diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 00000000..e69de29b diff --git a/404.html b/404.html new file mode 100644 index 00000000..1f9ebb7d --- /dev/null +++ b/404.html @@ -0,0 +1,4400 @@ + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ +
+
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ +

404 - Not found

+ +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/about/index.html b/about/index.html new file mode 100644 index 00000000..b09d1142 --- /dev/null +++ b/about/index.html @@ -0,0 +1,4492 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + + Skip to content + + +
+
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

About NERC

+

We are currently in the pilot phase of the project and are focusing on +developing the technology to make it easy for researchers to take advantage of +a suite of services (IaaS, PaaS, SaaS) that are not readily available +today. This includes:

+
    +
  1. +

    The creation of the building blocks needed for production cloud services

    +
  2. +
  3. +

    Begin collaboration with Systems Engineers from other institutions with well + established RC groups

    +
  4. +
  5. +

    On-board select proof of concept use cases from institutions within the + MGHPCC consortium and other institutions + within Massachusetts

    +
  6. +
+

The longer term objectives will be centered around activities that will focus on:

+
    +
  1. +

    Engaging with various OpenStack communities by sharing best practices and + setting standards for deployments

    +
  2. +
  3. +

    Connecting regularly with the Mass Open Cloud + (MOC) leadership to understand when new technologies they are developing with + RedHat, Inc. – and as part of the new NSF funded Open Cloud Testbed – might be ready for + adoption into the production NERC environment

    +
  4. +
  5. +

    Broadening the local deployment team of NERC to include partner universities + within the MGHPCC consortium.

    +
  6. +
+

NERC-overview +Figure 1: NERC Overview

+

NERC production services (red) stand on top of the existing NESE storage +services (blue) that are built on the strong foundation of MGHPCC (green) that +provides core facility and network access. The Innovation Hub (grey) enables +new technologies to be rapidly adopted by the NERC or NESE services. On the +far left (purple) are the Research and Learning communities which are the +primary customers of NERC. As users proceed down the stack of production +services from Web-apps, that require more technical skills, the Cloud +Facilitators (orange) in the middle guide and educate users on how to best +use the services.

+

For more information, +view +NERC's concept document.

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/assets/images/MGHPCC_logo.png b/assets/images/MGHPCC_logo.png new file mode 100644 index 00000000..a0b1d002 Binary files /dev/null and b/assets/images/MGHPCC_logo.png differ diff --git a/assets/images/boston-university-logo.png b/assets/images/boston-university-logo.png new file mode 100644 index 00000000..466e9889 Binary files /dev/null and b/assets/images/boston-university-logo.png differ diff --git a/assets/images/favicon.ico b/assets/images/favicon.ico new file mode 100644 index 00000000..d86d064d Binary files /dev/null and b/assets/images/favicon.ico differ diff --git a/assets/images/favicon.png b/assets/images/favicon.png new file mode 100644 index 00000000..1cf13b9f Binary files /dev/null and b/assets/images/favicon.png differ diff --git a/assets/images/harvard-university_logo.png b/assets/images/harvard-university_logo.png new file mode 100644 index 00000000..1b6b3272 Binary files /dev/null and b/assets/images/harvard-university_logo.png differ diff --git a/assets/images/logo.png b/assets/images/logo.png new file mode 100644 index 00000000..f955141b Binary files /dev/null and b/assets/images/logo.png differ diff --git a/assets/images/logo_original.png b/assets/images/logo_original.png new file mode 100644 index 00000000..f955141b Binary files /dev/null and b/assets/images/logo_original.png differ diff --git a/assets/javascripts/bundle.fe8b6f2b.min.js b/assets/javascripts/bundle.fe8b6f2b.min.js new file mode 100644 index 00000000..cf778d42 --- /dev/null +++ b/assets/javascripts/bundle.fe8b6f2b.min.js @@ -0,0 +1,29 @@ +"use strict";(()=>{var Fi=Object.create;var gr=Object.defineProperty;var ji=Object.getOwnPropertyDescriptor;var Wi=Object.getOwnPropertyNames,Dt=Object.getOwnPropertySymbols,Ui=Object.getPrototypeOf,xr=Object.prototype.hasOwnProperty,no=Object.prototype.propertyIsEnumerable;var oo=(e,t,r)=>t in e?gr(e,t,{enumerable:!0,configurable:!0,writable:!0,value:r}):e[t]=r,R=(e,t)=>{for(var r in t||(t={}))xr.call(t,r)&&oo(e,r,t[r]);if(Dt)for(var r of Dt(t))no.call(t,r)&&oo(e,r,t[r]);return e};var io=(e,t)=>{var r={};for(var o in e)xr.call(e,o)&&t.indexOf(o)<0&&(r[o]=e[o]);if(e!=null&&Dt)for(var o of Dt(e))t.indexOf(o)<0&&no.call(e,o)&&(r[o]=e[o]);return r};var yr=(e,t)=>()=>(t||e((t={exports:{}}).exports,t),t.exports);var Di=(e,t,r,o)=>{if(t&&typeof t=="object"||typeof t=="function")for(let n of Wi(t))!xr.call(e,n)&&n!==r&&gr(e,n,{get:()=>t[n],enumerable:!(o=ji(t,n))||o.enumerable});return e};var Vt=(e,t,r)=>(r=e!=null?Fi(Ui(e)):{},Di(t||!e||!e.__esModule?gr(r,"default",{value:e,enumerable:!0}):r,e));var ao=(e,t,r)=>new Promise((o,n)=>{var i=p=>{try{s(r.next(p))}catch(c){n(c)}},a=p=>{try{s(r.throw(p))}catch(c){n(c)}},s=p=>p.done?o(p.value):Promise.resolve(p.value).then(i,a);s((r=r.apply(e,t)).next())});var co=yr((Er,so)=>{(function(e,t){typeof Er=="object"&&typeof so!="undefined"?t():typeof define=="function"&&define.amd?define(t):t()})(Er,function(){"use strict";function e(r){var o=!0,n=!1,i=null,a={text:!0,search:!0,url:!0,tel:!0,email:!0,password:!0,number:!0,date:!0,month:!0,week:!0,time:!0,datetime:!0,"datetime-local":!0};function s(H){return!!(H&&H!==document&&H.nodeName!=="HTML"&&H.nodeName!=="BODY"&&"classList"in H&&"contains"in H.classList)}function p(H){var mt=H.type,ze=H.tagName;return!!(ze==="INPUT"&&a[mt]&&!H.readOnly||ze==="TEXTAREA"&&!H.readOnly||H.isContentEditable)}function c(H){H.classList.contains("focus-visible")||(H.classList.add("focus-visible"),H.setAttribute("data-focus-visible-added",""))}function l(H){H.hasAttribute("data-focus-visible-added")&&(H.classList.remove("focus-visible"),H.removeAttribute("data-focus-visible-added"))}function f(H){H.metaKey||H.altKey||H.ctrlKey||(s(r.activeElement)&&c(r.activeElement),o=!0)}function u(H){o=!1}function h(H){s(H.target)&&(o||p(H.target))&&c(H.target)}function w(H){s(H.target)&&(H.target.classList.contains("focus-visible")||H.target.hasAttribute("data-focus-visible-added"))&&(n=!0,window.clearTimeout(i),i=window.setTimeout(function(){n=!1},100),l(H.target))}function A(H){document.visibilityState==="hidden"&&(n&&(o=!0),te())}function te(){document.addEventListener("mousemove",J),document.addEventListener("mousedown",J),document.addEventListener("mouseup",J),document.addEventListener("pointermove",J),document.addEventListener("pointerdown",J),document.addEventListener("pointerup",J),document.addEventListener("touchmove",J),document.addEventListener("touchstart",J),document.addEventListener("touchend",J)}function ie(){document.removeEventListener("mousemove",J),document.removeEventListener("mousedown",J),document.removeEventListener("mouseup",J),document.removeEventListener("pointermove",J),document.removeEventListener("pointerdown",J),document.removeEventListener("pointerup",J),document.removeEventListener("touchmove",J),document.removeEventListener("touchstart",J),document.removeEventListener("touchend",J)}function J(H){H.target.nodeName&&H.target.nodeName.toLowerCase()==="html"||(o=!1,ie())}document.addEventListener("keydown",f,!0),document.addEventListener("mousedown",u,!0),document.addEventListener("pointerdown",u,!0),document.addEventListener("touchstart",u,!0),document.addEventListener("visibilitychange",A,!0),te(),r.addEventListener("focus",h,!0),r.addEventListener("blur",w,!0),r.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&r.host?r.host.setAttribute("data-js-focus-visible",""):r.nodeType===Node.DOCUMENT_NODE&&(document.documentElement.classList.add("js-focus-visible"),document.documentElement.setAttribute("data-js-focus-visible",""))}if(typeof window!="undefined"&&typeof document!="undefined"){window.applyFocusVisiblePolyfill=e;var t;try{t=new CustomEvent("focus-visible-polyfill-ready")}catch(r){t=document.createEvent("CustomEvent"),t.initCustomEvent("focus-visible-polyfill-ready",!1,!1,{})}window.dispatchEvent(t)}typeof document!="undefined"&&e(document)})});var Yr=yr((Rt,Kr)=>{/*! + * clipboard.js v2.0.11 + * https://clipboardjs.com/ + * + * Licensed MIT © Zeno Rocha + */(function(t,r){typeof Rt=="object"&&typeof Kr=="object"?Kr.exports=r():typeof define=="function"&&define.amd?define([],r):typeof Rt=="object"?Rt.ClipboardJS=r():t.ClipboardJS=r()})(Rt,function(){return function(){var e={686:function(o,n,i){"use strict";i.d(n,{default:function(){return Ii}});var a=i(279),s=i.n(a),p=i(370),c=i.n(p),l=i(817),f=i.n(l);function u(V){try{return document.execCommand(V)}catch(_){return!1}}var h=function(_){var M=f()(_);return u("cut"),M},w=h;function A(V){var _=document.documentElement.getAttribute("dir")==="rtl",M=document.createElement("textarea");M.style.fontSize="12pt",M.style.border="0",M.style.padding="0",M.style.margin="0",M.style.position="absolute",M.style[_?"right":"left"]="-9999px";var j=window.pageYOffset||document.documentElement.scrollTop;return M.style.top="".concat(j,"px"),M.setAttribute("readonly",""),M.value=V,M}var te=function(_,M){var j=A(_);M.container.appendChild(j);var D=f()(j);return u("copy"),j.remove(),D},ie=function(_){var M=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body},j="";return typeof _=="string"?j=te(_,M):_ instanceof HTMLInputElement&&!["text","search","url","tel","password"].includes(_==null?void 0:_.type)?j=te(_.value,M):(j=f()(_),u("copy")),j},J=ie;function H(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?H=function(M){return typeof M}:H=function(M){return M&&typeof Symbol=="function"&&M.constructor===Symbol&&M!==Symbol.prototype?"symbol":typeof M},H(V)}var mt=function(){var _=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{},M=_.action,j=M===void 0?"copy":M,D=_.container,Y=_.target,ke=_.text;if(j!=="copy"&&j!=="cut")throw new Error('Invalid "action" value, use either "copy" or "cut"');if(Y!==void 0)if(Y&&H(Y)==="object"&&Y.nodeType===1){if(j==="copy"&&Y.hasAttribute("disabled"))throw new Error('Invalid "target" attribute. Please use "readonly" instead of "disabled" attribute');if(j==="cut"&&(Y.hasAttribute("readonly")||Y.hasAttribute("disabled")))throw new Error(`Invalid "target" attribute. You can't cut text from elements with "readonly" or "disabled" attributes`)}else throw new Error('Invalid "target" value, use a valid Element');if(ke)return J(ke,{container:D});if(Y)return j==="cut"?w(Y):J(Y,{container:D})},ze=mt;function Ie(V){"@babel/helpers - typeof";return typeof Symbol=="function"&&typeof Symbol.iterator=="symbol"?Ie=function(M){return typeof M}:Ie=function(M){return M&&typeof Symbol=="function"&&M.constructor===Symbol&&M!==Symbol.prototype?"symbol":typeof M},Ie(V)}function _i(V,_){if(!(V instanceof _))throw new TypeError("Cannot call a class as a function")}function ro(V,_){for(var M=0;M<_.length;M++){var j=_[M];j.enumerable=j.enumerable||!1,j.configurable=!0,"value"in j&&(j.writable=!0),Object.defineProperty(V,j.key,j)}}function Ai(V,_,M){return _&&ro(V.prototype,_),M&&ro(V,M),V}function Ci(V,_){if(typeof _!="function"&&_!==null)throw new TypeError("Super expression must either be null or a function");V.prototype=Object.create(_&&_.prototype,{constructor:{value:V,writable:!0,configurable:!0}}),_&&br(V,_)}function br(V,_){return br=Object.setPrototypeOf||function(j,D){return j.__proto__=D,j},br(V,_)}function Hi(V){var _=Pi();return function(){var j=Wt(V),D;if(_){var Y=Wt(this).constructor;D=Reflect.construct(j,arguments,Y)}else D=j.apply(this,arguments);return ki(this,D)}}function ki(V,_){return _&&(Ie(_)==="object"||typeof _=="function")?_:$i(V)}function $i(V){if(V===void 0)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return V}function Pi(){if(typeof Reflect=="undefined"||!Reflect.construct||Reflect.construct.sham)return!1;if(typeof Proxy=="function")return!0;try{return Date.prototype.toString.call(Reflect.construct(Date,[],function(){})),!0}catch(V){return!1}}function Wt(V){return Wt=Object.setPrototypeOf?Object.getPrototypeOf:function(M){return M.__proto__||Object.getPrototypeOf(M)},Wt(V)}function vr(V,_){var M="data-clipboard-".concat(V);if(_.hasAttribute(M))return _.getAttribute(M)}var Ri=function(V){Ci(M,V);var _=Hi(M);function M(j,D){var Y;return _i(this,M),Y=_.call(this),Y.resolveOptions(D),Y.listenClick(j),Y}return Ai(M,[{key:"resolveOptions",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:{};this.action=typeof D.action=="function"?D.action:this.defaultAction,this.target=typeof D.target=="function"?D.target:this.defaultTarget,this.text=typeof D.text=="function"?D.text:this.defaultText,this.container=Ie(D.container)==="object"?D.container:document.body}},{key:"listenClick",value:function(D){var Y=this;this.listener=c()(D,"click",function(ke){return Y.onClick(ke)})}},{key:"onClick",value:function(D){var Y=D.delegateTarget||D.currentTarget,ke=this.action(Y)||"copy",Ut=ze({action:ke,container:this.container,target:this.target(Y),text:this.text(Y)});this.emit(Ut?"success":"error",{action:ke,text:Ut,trigger:Y,clearSelection:function(){Y&&Y.focus(),window.getSelection().removeAllRanges()}})}},{key:"defaultAction",value:function(D){return vr("action",D)}},{key:"defaultTarget",value:function(D){var Y=vr("target",D);if(Y)return document.querySelector(Y)}},{key:"defaultText",value:function(D){return vr("text",D)}},{key:"destroy",value:function(){this.listener.destroy()}}],[{key:"copy",value:function(D){var Y=arguments.length>1&&arguments[1]!==void 0?arguments[1]:{container:document.body};return J(D,Y)}},{key:"cut",value:function(D){return w(D)}},{key:"isSupported",value:function(){var D=arguments.length>0&&arguments[0]!==void 0?arguments[0]:["copy","cut"],Y=typeof D=="string"?[D]:D,ke=!!document.queryCommandSupported;return Y.forEach(function(Ut){ke=ke&&!!document.queryCommandSupported(Ut)}),ke}}]),M}(s()),Ii=Ri},828:function(o){var n=9;if(typeof Element!="undefined"&&!Element.prototype.matches){var i=Element.prototype;i.matches=i.matchesSelector||i.mozMatchesSelector||i.msMatchesSelector||i.oMatchesSelector||i.webkitMatchesSelector}function a(s,p){for(;s&&s.nodeType!==n;){if(typeof s.matches=="function"&&s.matches(p))return s;s=s.parentNode}}o.exports=a},438:function(o,n,i){var a=i(828);function s(l,f,u,h,w){var A=c.apply(this,arguments);return l.addEventListener(u,A,w),{destroy:function(){l.removeEventListener(u,A,w)}}}function p(l,f,u,h,w){return typeof l.addEventListener=="function"?s.apply(null,arguments):typeof u=="function"?s.bind(null,document).apply(null,arguments):(typeof l=="string"&&(l=document.querySelectorAll(l)),Array.prototype.map.call(l,function(A){return s(A,f,u,h,w)}))}function c(l,f,u,h){return function(w){w.delegateTarget=a(w.target,f),w.delegateTarget&&h.call(l,w)}}o.exports=p},879:function(o,n){n.node=function(i){return i!==void 0&&i instanceof HTMLElement&&i.nodeType===1},n.nodeList=function(i){var a=Object.prototype.toString.call(i);return i!==void 0&&(a==="[object NodeList]"||a==="[object HTMLCollection]")&&"length"in i&&(i.length===0||n.node(i[0]))},n.string=function(i){return typeof i=="string"||i instanceof String},n.fn=function(i){var a=Object.prototype.toString.call(i);return a==="[object Function]"}},370:function(o,n,i){var a=i(879),s=i(438);function p(u,h,w){if(!u&&!h&&!w)throw new Error("Missing required arguments");if(!a.string(h))throw new TypeError("Second argument must be a String");if(!a.fn(w))throw new TypeError("Third argument must be a Function");if(a.node(u))return c(u,h,w);if(a.nodeList(u))return l(u,h,w);if(a.string(u))return f(u,h,w);throw new TypeError("First argument must be a String, HTMLElement, HTMLCollection, or NodeList")}function c(u,h,w){return u.addEventListener(h,w),{destroy:function(){u.removeEventListener(h,w)}}}function l(u,h,w){return Array.prototype.forEach.call(u,function(A){A.addEventListener(h,w)}),{destroy:function(){Array.prototype.forEach.call(u,function(A){A.removeEventListener(h,w)})}}}function f(u,h,w){return s(document.body,u,h,w)}o.exports=p},817:function(o){function n(i){var a;if(i.nodeName==="SELECT")i.focus(),a=i.value;else if(i.nodeName==="INPUT"||i.nodeName==="TEXTAREA"){var s=i.hasAttribute("readonly");s||i.setAttribute("readonly",""),i.select(),i.setSelectionRange(0,i.value.length),s||i.removeAttribute("readonly"),a=i.value}else{i.hasAttribute("contenteditable")&&i.focus();var p=window.getSelection(),c=document.createRange();c.selectNodeContents(i),p.removeAllRanges(),p.addRange(c),a=p.toString()}return a}o.exports=n},279:function(o){function n(){}n.prototype={on:function(i,a,s){var p=this.e||(this.e={});return(p[i]||(p[i]=[])).push({fn:a,ctx:s}),this},once:function(i,a,s){var p=this;function c(){p.off(i,c),a.apply(s,arguments)}return c._=a,this.on(i,c,s)},emit:function(i){var a=[].slice.call(arguments,1),s=((this.e||(this.e={}))[i]||[]).slice(),p=0,c=s.length;for(p;p{"use strict";/*! + * escape-html + * Copyright(c) 2012-2013 TJ Holowaychuk + * Copyright(c) 2015 Andreas Lubbe + * Copyright(c) 2015 Tiancheng "Timothy" Gu + * MIT Licensed + */var ts=/["'&<>]/;ei.exports=rs;function rs(e){var t=""+e,r=ts.exec(t);if(!r)return t;var o,n="",i=0,a=0;for(i=r.index;i0&&i[i.length-1])&&(c[0]===6||c[0]===2)){r=0;continue}if(c[0]===3&&(!i||c[1]>i[0]&&c[1]=e.length&&(e=void 0),{value:e&&e[o++],done:!e}}};throw new TypeError(t?"Object is not iterable.":"Symbol.iterator is not defined.")}function N(e,t){var r=typeof Symbol=="function"&&e[Symbol.iterator];if(!r)return e;var o=r.call(e),n,i=[],a;try{for(;(t===void 0||t-- >0)&&!(n=o.next()).done;)i.push(n.value)}catch(s){a={error:s}}finally{try{n&&!n.done&&(r=o.return)&&r.call(o)}finally{if(a)throw a.error}}return i}function q(e,t,r){if(r||arguments.length===2)for(var o=0,n=t.length,i;o1||s(u,h)})})}function s(u,h){try{p(o[u](h))}catch(w){f(i[0][3],w)}}function p(u){u.value instanceof nt?Promise.resolve(u.value.v).then(c,l):f(i[0][2],u)}function c(u){s("next",u)}function l(u){s("throw",u)}function f(u,h){u(h),i.shift(),i.length&&s(i[0][0],i[0][1])}}function mo(e){if(!Symbol.asyncIterator)throw new TypeError("Symbol.asyncIterator is not defined.");var t=e[Symbol.asyncIterator],r;return t?t.call(e):(e=typeof de=="function"?de(e):e[Symbol.iterator](),r={},o("next"),o("throw"),o("return"),r[Symbol.asyncIterator]=function(){return this},r);function o(i){r[i]=e[i]&&function(a){return new Promise(function(s,p){a=e[i](a),n(s,p,a.done,a.value)})}}function n(i,a,s,p){Promise.resolve(p).then(function(c){i({value:c,done:s})},a)}}function k(e){return typeof e=="function"}function ft(e){var t=function(o){Error.call(o),o.stack=new Error().stack},r=e(t);return r.prototype=Object.create(Error.prototype),r.prototype.constructor=r,r}var zt=ft(function(e){return function(r){e(this),this.message=r?r.length+` errors occurred during unsubscription: +`+r.map(function(o,n){return n+1+") "+o.toString()}).join(` + `):"",this.name="UnsubscriptionError",this.errors=r}});function qe(e,t){if(e){var r=e.indexOf(t);0<=r&&e.splice(r,1)}}var Fe=function(){function e(t){this.initialTeardown=t,this.closed=!1,this._parentage=null,this._finalizers=null}return e.prototype.unsubscribe=function(){var t,r,o,n,i;if(!this.closed){this.closed=!0;var a=this._parentage;if(a)if(this._parentage=null,Array.isArray(a))try{for(var s=de(a),p=s.next();!p.done;p=s.next()){var c=p.value;c.remove(this)}}catch(A){t={error:A}}finally{try{p&&!p.done&&(r=s.return)&&r.call(s)}finally{if(t)throw t.error}}else a.remove(this);var l=this.initialTeardown;if(k(l))try{l()}catch(A){i=A instanceof zt?A.errors:[A]}var f=this._finalizers;if(f){this._finalizers=null;try{for(var u=de(f),h=u.next();!h.done;h=u.next()){var w=h.value;try{fo(w)}catch(A){i=i!=null?i:[],A instanceof zt?i=q(q([],N(i)),N(A.errors)):i.push(A)}}}catch(A){o={error:A}}finally{try{h&&!h.done&&(n=u.return)&&n.call(u)}finally{if(o)throw o.error}}}if(i)throw new zt(i)}},e.prototype.add=function(t){var r;if(t&&t!==this)if(this.closed)fo(t);else{if(t instanceof e){if(t.closed||t._hasParent(this))return;t._addParent(this)}(this._finalizers=(r=this._finalizers)!==null&&r!==void 0?r:[]).push(t)}},e.prototype._hasParent=function(t){var r=this._parentage;return r===t||Array.isArray(r)&&r.includes(t)},e.prototype._addParent=function(t){var r=this._parentage;this._parentage=Array.isArray(r)?(r.push(t),r):r?[r,t]:t},e.prototype._removeParent=function(t){var r=this._parentage;r===t?this._parentage=null:Array.isArray(r)&&qe(r,t)},e.prototype.remove=function(t){var r=this._finalizers;r&&qe(r,t),t instanceof e&&t._removeParent(this)},e.EMPTY=function(){var t=new e;return t.closed=!0,t}(),e}();var Tr=Fe.EMPTY;function qt(e){return e instanceof Fe||e&&"closed"in e&&k(e.remove)&&k(e.add)&&k(e.unsubscribe)}function fo(e){k(e)?e():e.unsubscribe()}var $e={onUnhandledError:null,onStoppedNotification:null,Promise:void 0,useDeprecatedSynchronousErrorHandling:!1,useDeprecatedNextContext:!1};var ut={setTimeout:function(e,t){for(var r=[],o=2;o0},enumerable:!1,configurable:!0}),t.prototype._trySubscribe=function(r){return this._throwIfClosed(),e.prototype._trySubscribe.call(this,r)},t.prototype._subscribe=function(r){return this._throwIfClosed(),this._checkFinalizedStatuses(r),this._innerSubscribe(r)},t.prototype._innerSubscribe=function(r){var o=this,n=this,i=n.hasError,a=n.isStopped,s=n.observers;return i||a?Tr:(this.currentObservers=null,s.push(r),new Fe(function(){o.currentObservers=null,qe(s,r)}))},t.prototype._checkFinalizedStatuses=function(r){var o=this,n=o.hasError,i=o.thrownError,a=o.isStopped;n?r.error(i):a&&r.complete()},t.prototype.asObservable=function(){var r=new F;return r.source=this,r},t.create=function(r,o){return new Eo(r,o)},t}(F);var Eo=function(e){re(t,e);function t(r,o){var n=e.call(this)||this;return n.destination=r,n.source=o,n}return t.prototype.next=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.next)===null||n===void 0||n.call(o,r)},t.prototype.error=function(r){var o,n;(n=(o=this.destination)===null||o===void 0?void 0:o.error)===null||n===void 0||n.call(o,r)},t.prototype.complete=function(){var r,o;(o=(r=this.destination)===null||r===void 0?void 0:r.complete)===null||o===void 0||o.call(r)},t.prototype._subscribe=function(r){var o,n;return(n=(o=this.source)===null||o===void 0?void 0:o.subscribe(r))!==null&&n!==void 0?n:Tr},t}(g);var _r=function(e){re(t,e);function t(r){var o=e.call(this)||this;return o._value=r,o}return Object.defineProperty(t.prototype,"value",{get:function(){return this.getValue()},enumerable:!1,configurable:!0}),t.prototype._subscribe=function(r){var o=e.prototype._subscribe.call(this,r);return!o.closed&&r.next(this._value),o},t.prototype.getValue=function(){var r=this,o=r.hasError,n=r.thrownError,i=r._value;if(o)throw n;return this._throwIfClosed(),i},t.prototype.next=function(r){e.prototype.next.call(this,this._value=r)},t}(g);var Lt={now:function(){return(Lt.delegate||Date).now()},delegate:void 0};var _t=function(e){re(t,e);function t(r,o,n){r===void 0&&(r=1/0),o===void 0&&(o=1/0),n===void 0&&(n=Lt);var i=e.call(this)||this;return i._bufferSize=r,i._windowTime=o,i._timestampProvider=n,i._buffer=[],i._infiniteTimeWindow=!0,i._infiniteTimeWindow=o===1/0,i._bufferSize=Math.max(1,r),i._windowTime=Math.max(1,o),i}return t.prototype.next=function(r){var o=this,n=o.isStopped,i=o._buffer,a=o._infiniteTimeWindow,s=o._timestampProvider,p=o._windowTime;n||(i.push(r),!a&&i.push(s.now()+p)),this._trimBuffer(),e.prototype.next.call(this,r)},t.prototype._subscribe=function(r){this._throwIfClosed(),this._trimBuffer();for(var o=this._innerSubscribe(r),n=this,i=n._infiniteTimeWindow,a=n._buffer,s=a.slice(),p=0;p0?e.prototype.schedule.call(this,r,o):(this.delay=o,this.state=r,this.scheduler.flush(this),this)},t.prototype.execute=function(r,o){return o>0||this.closed?e.prototype.execute.call(this,r,o):this._execute(r,o)},t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!=null&&n>0||n==null&&this.delay>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.flush(this),0)},t}(vt);var So=function(e){re(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t}(gt);var Hr=new So(To);var Oo=function(e){re(t,e);function t(r,o){var n=e.call(this,r,o)||this;return n.scheduler=r,n.work=o,n}return t.prototype.requestAsyncId=function(r,o,n){return n===void 0&&(n=0),n!==null&&n>0?e.prototype.requestAsyncId.call(this,r,o,n):(r.actions.push(this),r._scheduled||(r._scheduled=bt.requestAnimationFrame(function(){return r.flush(void 0)})))},t.prototype.recycleAsyncId=function(r,o,n){var i;if(n===void 0&&(n=0),n!=null?n>0:this.delay>0)return e.prototype.recycleAsyncId.call(this,r,o,n);var a=r.actions;o!=null&&((i=a[a.length-1])===null||i===void 0?void 0:i.id)!==o&&(bt.cancelAnimationFrame(o),r._scheduled=void 0)},t}(vt);var Mo=function(e){re(t,e);function t(){return e!==null&&e.apply(this,arguments)||this}return t.prototype.flush=function(r){this._active=!0;var o=this._scheduled;this._scheduled=void 0;var n=this.actions,i;r=r||n.shift();do if(i=r.execute(r.state,r.delay))break;while((r=n[0])&&r.id===o&&n.shift());if(this._active=!1,i){for(;(r=n[0])&&r.id===o&&n.shift();)r.unsubscribe();throw i}},t}(gt);var me=new Mo(Oo);var O=new F(function(e){return e.complete()});function Yt(e){return e&&k(e.schedule)}function kr(e){return e[e.length-1]}function Xe(e){return k(kr(e))?e.pop():void 0}function He(e){return Yt(kr(e))?e.pop():void 0}function Bt(e,t){return typeof kr(e)=="number"?e.pop():t}var xt=function(e){return e&&typeof e.length=="number"&&typeof e!="function"};function Gt(e){return k(e==null?void 0:e.then)}function Jt(e){return k(e[ht])}function Xt(e){return Symbol.asyncIterator&&k(e==null?void 0:e[Symbol.asyncIterator])}function Zt(e){return new TypeError("You provided "+(e!==null&&typeof e=="object"?"an invalid object":"'"+e+"'")+" where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.")}function Gi(){return typeof Symbol!="function"||!Symbol.iterator?"@@iterator":Symbol.iterator}var er=Gi();function tr(e){return k(e==null?void 0:e[er])}function rr(e){return lo(this,arguments,function(){var r,o,n,i;return Nt(this,function(a){switch(a.label){case 0:r=e.getReader(),a.label=1;case 1:a.trys.push([1,,9,10]),a.label=2;case 2:return[4,nt(r.read())];case 3:return o=a.sent(),n=o.value,i=o.done,i?[4,nt(void 0)]:[3,5];case 4:return[2,a.sent()];case 5:return[4,nt(n)];case 6:return[4,a.sent()];case 7:return a.sent(),[3,2];case 8:return[3,10];case 9:return r.releaseLock(),[7];case 10:return[2]}})})}function or(e){return k(e==null?void 0:e.getReader)}function W(e){if(e instanceof F)return e;if(e!=null){if(Jt(e))return Ji(e);if(xt(e))return Xi(e);if(Gt(e))return Zi(e);if(Xt(e))return Lo(e);if(tr(e))return ea(e);if(or(e))return ta(e)}throw Zt(e)}function Ji(e){return new F(function(t){var r=e[ht]();if(k(r.subscribe))return r.subscribe(t);throw new TypeError("Provided object does not correctly implement Symbol.observable")})}function Xi(e){return new F(function(t){for(var r=0;r=2;return function(o){return o.pipe(e?b(function(n,i){return e(n,i,o)}):le,Te(1),r?Be(t):zo(function(){return new ir}))}}function Fr(e){return e<=0?function(){return O}:y(function(t,r){var o=[];t.subscribe(T(r,function(n){o.push(n),e=2,!0))}function pe(e){e===void 0&&(e={});var t=e.connector,r=t===void 0?function(){return new g}:t,o=e.resetOnError,n=o===void 0?!0:o,i=e.resetOnComplete,a=i===void 0?!0:i,s=e.resetOnRefCountZero,p=s===void 0?!0:s;return function(c){var l,f,u,h=0,w=!1,A=!1,te=function(){f==null||f.unsubscribe(),f=void 0},ie=function(){te(),l=u=void 0,w=A=!1},J=function(){var H=l;ie(),H==null||H.unsubscribe()};return y(function(H,mt){h++,!A&&!w&&te();var ze=u=u!=null?u:r();mt.add(function(){h--,h===0&&!A&&!w&&(f=Wr(J,p))}),ze.subscribe(mt),!l&&h>0&&(l=new at({next:function(Ie){return ze.next(Ie)},error:function(Ie){A=!0,te(),f=Wr(ie,n,Ie),ze.error(Ie)},complete:function(){w=!0,te(),f=Wr(ie,a),ze.complete()}}),W(H).subscribe(l))})(c)}}function Wr(e,t){for(var r=[],o=2;oe.next(document)),e}function $(e,t=document){return Array.from(t.querySelectorAll(e))}function P(e,t=document){let r=fe(e,t);if(typeof r=="undefined")throw new ReferenceError(`Missing element: expected "${e}" to be present`);return r}function fe(e,t=document){return t.querySelector(e)||void 0}function Re(){var e,t,r,o;return(o=(r=(t=(e=document.activeElement)==null?void 0:e.shadowRoot)==null?void 0:t.activeElement)!=null?r:document.activeElement)!=null?o:void 0}var xa=S(d(document.body,"focusin"),d(document.body,"focusout")).pipe(_e(1),Q(void 0),m(()=>Re()||document.body),G(1));function et(e){return xa.pipe(m(t=>e.contains(t)),K())}function kt(e,t){return C(()=>S(d(e,"mouseenter").pipe(m(()=>!0)),d(e,"mouseleave").pipe(m(()=>!1))).pipe(t?Ht(r=>Me(+!r*t)):le,Q(e.matches(":hover"))))}function Bo(e,t){if(typeof t=="string"||typeof t=="number")e.innerHTML+=t.toString();else if(t instanceof Node)e.appendChild(t);else if(Array.isArray(t))for(let r of t)Bo(e,r)}function x(e,t,...r){let o=document.createElement(e);if(t)for(let n of Object.keys(t))typeof t[n]!="undefined"&&(typeof t[n]!="boolean"?o.setAttribute(n,t[n]):o.setAttribute(n,""));for(let n of r)Bo(o,n);return o}function sr(e){if(e>999){let t=+((e-950)%1e3>99);return`${((e+1e-6)/1e3).toFixed(t)}k`}else return e.toString()}function wt(e){let t=x("script",{src:e});return C(()=>(document.head.appendChild(t),S(d(t,"load"),d(t,"error").pipe(v(()=>$r(()=>new ReferenceError(`Invalid script: ${e}`))))).pipe(m(()=>{}),L(()=>document.head.removeChild(t)),Te(1))))}var Go=new g,ya=C(()=>typeof ResizeObserver=="undefined"?wt("https://unpkg.com/resize-observer-polyfill"):I(void 0)).pipe(m(()=>new ResizeObserver(e=>e.forEach(t=>Go.next(t)))),v(e=>S(Ke,I(e)).pipe(L(()=>e.disconnect()))),G(1));function ce(e){return{width:e.offsetWidth,height:e.offsetHeight}}function ge(e){let t=e;for(;t.clientWidth===0&&t.parentElement;)t=t.parentElement;return ya.pipe(E(r=>r.observe(t)),v(r=>Go.pipe(b(o=>o.target===t),L(()=>r.unobserve(t)))),m(()=>ce(e)),Q(ce(e)))}function Tt(e){return{width:e.scrollWidth,height:e.scrollHeight}}function cr(e){let t=e.parentElement;for(;t&&(e.scrollWidth<=t.scrollWidth&&e.scrollHeight<=t.scrollHeight);)t=(e=t).parentElement;return t?e:void 0}function Jo(e){let t=[],r=e.parentElement;for(;r;)(e.clientWidth>r.clientWidth||e.clientHeight>r.clientHeight)&&t.push(r),r=(e=r).parentElement;return t.length===0&&t.push(document.documentElement),t}function Ue(e){return{x:e.offsetLeft,y:e.offsetTop}}function Xo(e){let t=e.getBoundingClientRect();return{x:t.x+window.scrollX,y:t.y+window.scrollY}}function Zo(e){return S(d(window,"load"),d(window,"resize")).pipe(Le(0,me),m(()=>Ue(e)),Q(Ue(e)))}function pr(e){return{x:e.scrollLeft,y:e.scrollTop}}function De(e){return S(d(e,"scroll"),d(window,"scroll"),d(window,"resize")).pipe(Le(0,me),m(()=>pr(e)),Q(pr(e)))}var en=new g,Ea=C(()=>I(new IntersectionObserver(e=>{for(let t of e)en.next(t)},{threshold:0}))).pipe(v(e=>S(Ke,I(e)).pipe(L(()=>e.disconnect()))),G(1));function tt(e){return Ea.pipe(E(t=>t.observe(e)),v(t=>en.pipe(b(({target:r})=>r===e),L(()=>t.unobserve(e)),m(({isIntersecting:r})=>r))))}function tn(e,t=16){return De(e).pipe(m(({y:r})=>{let o=ce(e),n=Tt(e);return r>=n.height-o.height-t}),K())}var lr={drawer:P("[data-md-toggle=drawer]"),search:P("[data-md-toggle=search]")};function rn(e){return lr[e].checked}function Je(e,t){lr[e].checked!==t&&lr[e].click()}function Ve(e){let t=lr[e];return d(t,"change").pipe(m(()=>t.checked),Q(t.checked))}function wa(e,t){switch(e.constructor){case HTMLInputElement:return e.type==="radio"?/^Arrow/.test(t):!0;case HTMLSelectElement:case HTMLTextAreaElement:return!0;default:return e.isContentEditable}}function Ta(){return S(d(window,"compositionstart").pipe(m(()=>!0)),d(window,"compositionend").pipe(m(()=>!1))).pipe(Q(!1))}function on(){let e=d(window,"keydown").pipe(b(t=>!(t.metaKey||t.ctrlKey)),m(t=>({mode:rn("search")?"search":"global",type:t.key,claim(){t.preventDefault(),t.stopPropagation()}})),b(({mode:t,type:r})=>{if(t==="global"){let o=Re();if(typeof o!="undefined")return!wa(o,r)}return!0}),pe());return Ta().pipe(v(t=>t?O:e))}function xe(){return new URL(location.href)}function pt(e,t=!1){if(B("navigation.instant")&&!t){let r=x("a",{href:e.href});document.body.appendChild(r),r.click(),r.remove()}else location.href=e.href}function nn(){return new g}function an(){return location.hash.slice(1)}function sn(e){let t=x("a",{href:e});t.addEventListener("click",r=>r.stopPropagation()),t.click()}function Sa(e){return S(d(window,"hashchange"),e).pipe(m(an),Q(an()),b(t=>t.length>0),G(1))}function cn(e){return Sa(e).pipe(m(t=>fe(`[id="${t}"]`)),b(t=>typeof t!="undefined"))}function $t(e){let t=matchMedia(e);return ar(r=>t.addListener(()=>r(t.matches))).pipe(Q(t.matches))}function pn(){let e=matchMedia("print");return S(d(window,"beforeprint").pipe(m(()=>!0)),d(window,"afterprint").pipe(m(()=>!1))).pipe(Q(e.matches))}function Nr(e,t){return e.pipe(v(r=>r?t():O))}function zr(e,t){return new F(r=>{let o=new XMLHttpRequest;return o.open("GET",`${e}`),o.responseType="blob",o.addEventListener("load",()=>{o.status>=200&&o.status<300?(r.next(o.response),r.complete()):r.error(new Error(o.statusText))}),o.addEventListener("error",()=>{r.error(new Error("Network error"))}),o.addEventListener("abort",()=>{r.complete()}),typeof(t==null?void 0:t.progress$)!="undefined"&&(o.addEventListener("progress",n=>{var i;if(n.lengthComputable)t.progress$.next(n.loaded/n.total*100);else{let a=(i=o.getResponseHeader("Content-Length"))!=null?i:0;t.progress$.next(n.loaded/+a*100)}}),t.progress$.next(5)),o.send(),()=>o.abort()})}function Ne(e,t){return zr(e,t).pipe(v(r=>r.text()),m(r=>JSON.parse(r)),G(1))}function ln(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/html")),G(1))}function mn(e,t){let r=new DOMParser;return zr(e,t).pipe(v(o=>o.text()),m(o=>r.parseFromString(o,"text/xml")),G(1))}function fn(){return{x:Math.max(0,scrollX),y:Math.max(0,scrollY)}}function un(){return S(d(window,"scroll",{passive:!0}),d(window,"resize",{passive:!0})).pipe(m(fn),Q(fn()))}function dn(){return{width:innerWidth,height:innerHeight}}function hn(){return d(window,"resize",{passive:!0}).pipe(m(dn),Q(dn()))}function bn(){return z([un(),hn()]).pipe(m(([e,t])=>({offset:e,size:t})),G(1))}function mr(e,{viewport$:t,header$:r}){let o=t.pipe(Z("size")),n=z([o,r]).pipe(m(()=>Ue(e)));return z([r,t,n]).pipe(m(([{height:i},{offset:a,size:s},{x:p,y:c}])=>({offset:{x:a.x-p,y:a.y-c+i},size:s})))}function Oa(e){return d(e,"message",t=>t.data)}function Ma(e){let t=new g;return t.subscribe(r=>e.postMessage(r)),t}function vn(e,t=new Worker(e)){let r=Oa(t),o=Ma(t),n=new g;n.subscribe(o);let i=o.pipe(X(),ne(!0));return n.pipe(X(),Pe(r.pipe(U(i))),pe())}var La=P("#__config"),St=JSON.parse(La.textContent);St.base=`${new URL(St.base,xe())}`;function ye(){return St}function B(e){return St.features.includes(e)}function Ee(e,t){return typeof t!="undefined"?St.translations[e].replace("#",t.toString()):St.translations[e]}function Se(e,t=document){return P(`[data-md-component=${e}]`,t)}function ae(e,t=document){return $(`[data-md-component=${e}]`,t)}function _a(e){let t=P(".md-typeset > :first-child",e);return d(t,"click",{once:!0}).pipe(m(()=>P(".md-typeset",e)),m(r=>({hash:__md_hash(r.innerHTML)})))}function gn(e){if(!B("announce.dismiss")||!e.childElementCount)return O;if(!e.hidden){let t=P(".md-typeset",e);__md_hash(t.innerHTML)===__md_get("__announce")&&(e.hidden=!0)}return C(()=>{let t=new g;return t.subscribe(({hash:r})=>{e.hidden=!0,__md_set("__announce",r)}),_a(e).pipe(E(r=>t.next(r)),L(()=>t.complete()),m(r=>R({ref:e},r)))})}function Aa(e,{target$:t}){return t.pipe(m(r=>({hidden:r!==e})))}function xn(e,t){let r=new g;return r.subscribe(({hidden:o})=>{e.hidden=o}),Aa(e,t).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))}function Pt(e,t){return t==="inline"?x("div",{class:"md-tooltip md-tooltip--inline",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"})):x("div",{class:"md-tooltip",id:e,role:"tooltip"},x("div",{class:"md-tooltip__inner md-typeset"}))}function yn(...e){return x("div",{class:"md-tooltip2",role:"tooltip"},x("div",{class:"md-tooltip2__inner md-typeset"},e))}function En(e,t){if(t=t?`${t}_annotation_${e}`:void 0,t){let r=t?`#${t}`:void 0;return x("aside",{class:"md-annotation",tabIndex:0},Pt(t),x("a",{href:r,class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}else return x("aside",{class:"md-annotation",tabIndex:0},Pt(t),x("span",{class:"md-annotation__index",tabIndex:-1},x("span",{"data-md-annotation-id":e})))}function wn(e){return x("button",{class:"md-clipboard md-icon",title:Ee("clipboard.copy"),"data-clipboard-target":`#${e} > code`})}function qr(e,t){let r=t&2,o=t&1,n=Object.keys(e.terms).filter(p=>!e.terms[p]).reduce((p,c)=>[...p,x("del",null,c)," "],[]).slice(0,-1),i=ye(),a=new URL(e.location,i.base);B("search.highlight")&&a.searchParams.set("h",Object.entries(e.terms).filter(([,p])=>p).reduce((p,[c])=>`${p} ${c}`.trim(),""));let{tags:s}=ye();return x("a",{href:`${a}`,class:"md-search-result__link",tabIndex:-1},x("article",{class:"md-search-result__article md-typeset","data-md-score":e.score.toFixed(2)},r>0&&x("div",{class:"md-search-result__icon md-icon"}),r>0&&x("h1",null,e.title),r<=0&&x("h2",null,e.title),o>0&&e.text.length>0&&e.text,e.tags&&e.tags.map(p=>{let c=s?p in s?`md-tag-icon md-tag--${s[p]}`:"md-tag-icon":"";return x("span",{class:`md-tag ${c}`},p)}),o>0&&n.length>0&&x("p",{class:"md-search-result__terms"},Ee("search.result.term.missing"),": ",...n)))}function Tn(e){let t=e[0].score,r=[...e],o=ye(),n=r.findIndex(l=>!`${new URL(l.location,o.base)}`.includes("#")),[i]=r.splice(n,1),a=r.findIndex(l=>l.scoreqr(l,1)),...p.length?[x("details",{class:"md-search-result__more"},x("summary",{tabIndex:-1},x("div",null,p.length>0&&p.length===1?Ee("search.result.more.one"):Ee("search.result.more.other",p.length))),...p.map(l=>qr(l,1)))]:[]];return x("li",{class:"md-search-result__item"},c)}function Sn(e){return x("ul",{class:"md-source__facts"},Object.entries(e).map(([t,r])=>x("li",{class:`md-source__fact md-source__fact--${t}`},typeof r=="number"?sr(r):r)))}function Qr(e){let t=`tabbed-control tabbed-control--${e}`;return x("div",{class:t,hidden:!0},x("button",{class:"tabbed-button",tabIndex:-1,"aria-hidden":"true"}))}function On(e){return x("div",{class:"md-typeset__scrollwrap"},x("div",{class:"md-typeset__table"},e))}function Ca(e){var o;let t=ye(),r=new URL(`../${e.version}/`,t.base);return x("li",{class:"md-version__item"},x("a",{href:`${r}`,class:"md-version__link"},e.title,((o=t.version)==null?void 0:o.alias)&&e.aliases.length>0&&x("span",{class:"md-version__alias"},e.aliases[0])))}function Mn(e,t){var o;let r=ye();return e=e.filter(n=>{var i;return!((i=n.properties)!=null&&i.hidden)}),x("div",{class:"md-version"},x("button",{class:"md-version__current","aria-label":Ee("select.version")},t.title,((o=r.version)==null?void 0:o.alias)&&t.aliases.length>0&&x("span",{class:"md-version__alias"},t.aliases[0])),x("ul",{class:"md-version__list"},e.map(Ca)))}var Ha=0;function ka(e){let t=z([et(e),kt(e)]).pipe(m(([o,n])=>o||n),K()),r=C(()=>Jo(e)).pipe(oe(De),ct(1),m(()=>Xo(e)));return t.pipe(Ae(o=>o),v(()=>z([t,r])),m(([o,n])=>({active:o,offset:n})),pe())}function $a(e,t){let{content$:r,viewport$:o}=t,n=`__tooltip2_${Ha++}`;return C(()=>{let i=new g,a=new _r(!1);i.pipe(X(),ne(!1)).subscribe(a);let s=a.pipe(Ht(c=>Me(+!c*250,Hr)),K(),v(c=>c?r:O),E(c=>c.id=n),pe());z([i.pipe(m(({active:c})=>c)),s.pipe(v(c=>kt(c,250)),Q(!1))]).pipe(m(c=>c.some(l=>l))).subscribe(a);let p=a.pipe(b(c=>c),ee(s,o),m(([c,l,{size:f}])=>{let u=e.getBoundingClientRect(),h=u.width/2;if(l.role==="tooltip")return{x:h,y:8+u.height};if(u.y>=f.height/2){let{height:w}=ce(l);return{x:h,y:-16-w}}else return{x:h,y:16+u.height}}));return z([s,i,p]).subscribe(([c,{offset:l},f])=>{c.style.setProperty("--md-tooltip-host-x",`${l.x}px`),c.style.setProperty("--md-tooltip-host-y",`${l.y}px`),c.style.setProperty("--md-tooltip-x",`${f.x}px`),c.style.setProperty("--md-tooltip-y",`${f.y}px`),c.classList.toggle("md-tooltip2--top",f.y<0),c.classList.toggle("md-tooltip2--bottom",f.y>=0)}),a.pipe(b(c=>c),ee(s,(c,l)=>l),b(c=>c.role==="tooltip")).subscribe(c=>{let l=ce(P(":scope > *",c));c.style.setProperty("--md-tooltip-width",`${l.width}px`),c.style.setProperty("--md-tooltip-tail","0px")}),a.pipe(K(),be(me),ee(s)).subscribe(([c,l])=>{l.classList.toggle("md-tooltip2--active",c)}),z([a.pipe(b(c=>c)),s]).subscribe(([c,l])=>{l.role==="dialog"?(e.setAttribute("aria-controls",n),e.setAttribute("aria-haspopup","dialog")):e.setAttribute("aria-describedby",n)}),a.pipe(b(c=>!c)).subscribe(()=>{e.removeAttribute("aria-controls"),e.removeAttribute("aria-describedby"),e.removeAttribute("aria-haspopup")}),ka(e).pipe(E(c=>i.next(c)),L(()=>i.complete()),m(c=>R({ref:e},c)))})}function lt(e,{viewport$:t},r=document.body){return $a(e,{content$:new F(o=>{let n=e.title,i=yn(n);return o.next(i),e.removeAttribute("title"),r.append(i),()=>{i.remove(),e.setAttribute("title",n)}}),viewport$:t})}function Pa(e,t){let r=C(()=>z([Zo(e),De(t)])).pipe(m(([{x:o,y:n},i])=>{let{width:a,height:s}=ce(e);return{x:o-i.x+a/2,y:n-i.y+s/2}}));return et(e).pipe(v(o=>r.pipe(m(n=>({active:o,offset:n})),Te(+!o||1/0))))}function Ln(e,t,{target$:r}){let[o,n]=Array.from(e.children);return C(()=>{let i=new g,a=i.pipe(X(),ne(!0));return i.subscribe({next({offset:s}){e.style.setProperty("--md-tooltip-x",`${s.x}px`),e.style.setProperty("--md-tooltip-y",`${s.y}px`)},complete(){e.style.removeProperty("--md-tooltip-x"),e.style.removeProperty("--md-tooltip-y")}}),tt(e).pipe(U(a)).subscribe(s=>{e.toggleAttribute("data-md-visible",s)}),S(i.pipe(b(({active:s})=>s)),i.pipe(_e(250),b(({active:s})=>!s))).subscribe({next({active:s}){s?e.prepend(o):o.remove()},complete(){e.prepend(o)}}),i.pipe(Le(16,me)).subscribe(({active:s})=>{o.classList.toggle("md-tooltip--active",s)}),i.pipe(ct(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:s})=>s)).subscribe({next(s){s?e.style.setProperty("--md-tooltip-0",`${-s}px`):e.style.removeProperty("--md-tooltip-0")},complete(){e.style.removeProperty("--md-tooltip-0")}}),d(n,"click").pipe(U(a),b(s=>!(s.metaKey||s.ctrlKey))).subscribe(s=>{s.stopPropagation(),s.preventDefault()}),d(n,"mousedown").pipe(U(a),ee(i)).subscribe(([s,{active:p}])=>{var c;if(s.button!==0||s.metaKey||s.ctrlKey)s.preventDefault();else if(p){s.preventDefault();let l=e.parentElement.closest(".md-annotation");l instanceof HTMLElement?l.focus():(c=Re())==null||c.blur()}}),r.pipe(U(a),b(s=>s===o),Ge(125)).subscribe(()=>e.focus()),Pa(e,t).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))})}function Ra(e){return e.tagName==="CODE"?$(".c, .c1, .cm",e):[e]}function Ia(e){let t=[];for(let r of Ra(e)){let o=[],n=document.createNodeIterator(r,NodeFilter.SHOW_TEXT);for(let i=n.nextNode();i;i=n.nextNode())o.push(i);for(let i of o){let a;for(;a=/(\(\d+\))(!)?/.exec(i.textContent);){let[,s,p]=a;if(typeof p=="undefined"){let c=i.splitText(a.index);i=c.splitText(s.length),t.push(c)}else{i.textContent=s,t.push(i);break}}}}return t}function _n(e,t){t.append(...Array.from(e.childNodes))}function fr(e,t,{target$:r,print$:o}){let n=t.closest("[id]"),i=n==null?void 0:n.id,a=new Map;for(let s of Ia(t)){let[,p]=s.textContent.match(/\((\d+)\)/);fe(`:scope > li:nth-child(${p})`,e)&&(a.set(p,En(p,i)),s.replaceWith(a.get(p)))}return a.size===0?O:C(()=>{let s=new g,p=s.pipe(X(),ne(!0)),c=[];for(let[l,f]of a)c.push([P(".md-typeset",f),P(`:scope > li:nth-child(${l})`,e)]);return o.pipe(U(p)).subscribe(l=>{e.hidden=!l,e.classList.toggle("md-annotation-list",l);for(let[f,u]of c)l?_n(f,u):_n(u,f)}),S(...[...a].map(([,l])=>Ln(l,t,{target$:r}))).pipe(L(()=>s.complete()),pe())})}function An(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return An(t)}}function Cn(e,t){return C(()=>{let r=An(e);return typeof r!="undefined"?fr(r,e,t):O})}var Hn=Vt(Yr());var Fa=0;function kn(e){if(e.nextElementSibling){let t=e.nextElementSibling;if(t.tagName==="OL")return t;if(t.tagName==="P"&&!t.children.length)return kn(t)}}function ja(e){return ge(e).pipe(m(({width:t})=>({scrollable:Tt(e).width>t})),Z("scrollable"))}function $n(e,t){let{matches:r}=matchMedia("(hover)"),o=C(()=>{let n=new g,i=n.pipe(Fr(1));n.subscribe(({scrollable:c})=>{c&&r?e.setAttribute("tabindex","0"):e.removeAttribute("tabindex")});let a=[];if(Hn.default.isSupported()&&(e.closest(".copy")||B("content.code.copy")&&!e.closest(".no-copy"))){let c=e.closest("pre");c.id=`__code_${Fa++}`;let l=wn(c.id);c.insertBefore(l,e),B("content.tooltips")&&a.push(lt(l,{viewport$}))}let s=e.closest(".highlight");if(s instanceof HTMLElement){let c=kn(s);if(typeof c!="undefined"&&(s.classList.contains("annotate")||B("content.code.annotate"))){let l=fr(c,e,t);a.push(ge(s).pipe(U(i),m(({width:f,height:u})=>f&&u),K(),v(f=>f?l:O)))}}return $(":scope > span[id]",e).length&&e.classList.add("md-code__content"),ja(e).pipe(E(c=>n.next(c)),L(()=>n.complete()),m(c=>R({ref:e},c)),Pe(...a))});return B("content.lazy")?tt(e).pipe(b(n=>n),Te(1),v(()=>o)):o}function Wa(e,{target$:t,print$:r}){let o=!0;return S(t.pipe(m(n=>n.closest("details:not([open])")),b(n=>e===n),m(()=>({action:"open",reveal:!0}))),r.pipe(b(n=>n||!o),E(()=>o=e.open),m(n=>({action:n?"open":"close"}))))}function Pn(e,t){return C(()=>{let r=new g;return r.subscribe(({action:o,reveal:n})=>{e.toggleAttribute("open",o==="open"),n&&e.scrollIntoView()}),Wa(e,t).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}var Rn=".node circle,.node ellipse,.node path,.node polygon,.node rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}marker{fill:var(--md-mermaid-edge-color)!important}.edgeLabel .label rect{fill:#0000}.label{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.label foreignObject{line-height:normal;overflow:visible}.label div .edgeLabel{color:var(--md-mermaid-label-fg-color)}.edgeLabel,.edgeLabel rect,.label div .edgeLabel{background-color:var(--md-mermaid-label-bg-color)}.edgeLabel,.edgeLabel rect{fill:var(--md-mermaid-label-bg-color);color:var(--md-mermaid-edge-color)}.edgePath .path,.flowchart-link{stroke:var(--md-mermaid-edge-color);stroke-width:.05rem}.edgePath .arrowheadPath{fill:var(--md-mermaid-edge-color);stroke:none}.cluster rect{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}.cluster span{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}g #flowchart-circleEnd,g #flowchart-circleStart,g #flowchart-crossEnd,g #flowchart-crossStart,g #flowchart-pointEnd,g #flowchart-pointStart{stroke:none}g.classGroup line,g.classGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.classGroup text{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.classLabel .box{fill:var(--md-mermaid-label-bg-color);background-color:var(--md-mermaid-label-bg-color);opacity:1}.classLabel .label{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.node .divider{stroke:var(--md-mermaid-node-fg-color)}.relation{stroke:var(--md-mermaid-edge-color)}.cardinality{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.cardinality text{fill:inherit!important}defs #classDiagram-compositionEnd,defs #classDiagram-compositionStart,defs #classDiagram-dependencyEnd,defs #classDiagram-dependencyStart,defs #classDiagram-extensionEnd,defs #classDiagram-extensionStart{fill:var(--md-mermaid-edge-color)!important;stroke:var(--md-mermaid-edge-color)!important}defs #classDiagram-aggregationEnd,defs #classDiagram-aggregationStart{fill:var(--md-mermaid-label-bg-color)!important;stroke:var(--md-mermaid-edge-color)!important}g.stateGroup rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}g.stateGroup .state-title{fill:var(--md-mermaid-label-fg-color)!important;font-family:var(--md-mermaid-font-family)}g.stateGroup .composit{fill:var(--md-mermaid-label-bg-color)}.nodeLabel,.nodeLabel p{color:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}a .nodeLabel{text-decoration:underline}.node circle.state-end,.node circle.state-start,.start-state{fill:var(--md-mermaid-edge-color);stroke:none}.end-state-inner,.end-state-outer{fill:var(--md-mermaid-edge-color)}.end-state-inner,.node circle.state-end{stroke:var(--md-mermaid-label-bg-color)}.transition{stroke:var(--md-mermaid-edge-color)}[id^=state-fork] rect,[id^=state-join] rect{fill:var(--md-mermaid-edge-color)!important;stroke:none!important}.statediagram-cluster.statediagram-cluster .inner{fill:var(--md-default-bg-color)}.statediagram-cluster rect{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.statediagram-state rect.divider{fill:var(--md-default-fg-color--lightest);stroke:var(--md-default-fg-color--lighter)}defs #statediagram-barbEnd{stroke:var(--md-mermaid-edge-color)}.attributeBoxEven,.attributeBoxOdd{fill:var(--md-mermaid-node-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityBox{fill:var(--md-mermaid-label-bg-color);stroke:var(--md-mermaid-node-fg-color)}.entityLabel{fill:var(--md-mermaid-label-fg-color);font-family:var(--md-mermaid-font-family)}.relationshipLabelBox{fill:var(--md-mermaid-label-bg-color);fill-opacity:1;background-color:var(--md-mermaid-label-bg-color);opacity:1}.relationshipLabel{fill:var(--md-mermaid-label-fg-color)}.relationshipLine{stroke:var(--md-mermaid-edge-color)}defs #ONE_OR_MORE_END *,defs #ONE_OR_MORE_START *,defs #ONLY_ONE_END *,defs #ONLY_ONE_START *,defs #ZERO_OR_MORE_END *,defs #ZERO_OR_MORE_START *,defs #ZERO_OR_ONE_END *,defs #ZERO_OR_ONE_START *{stroke:var(--md-mermaid-edge-color)!important}defs #ZERO_OR_MORE_END circle,defs #ZERO_OR_MORE_START circle{fill:var(--md-mermaid-label-bg-color)}.actor{fill:var(--md-mermaid-sequence-actor-bg-color);stroke:var(--md-mermaid-sequence-actor-border-color)}text.actor>tspan{fill:var(--md-mermaid-sequence-actor-fg-color);font-family:var(--md-mermaid-font-family)}line{stroke:var(--md-mermaid-sequence-actor-line-color)}.actor-man circle,.actor-man line{fill:var(--md-mermaid-sequence-actorman-bg-color);stroke:var(--md-mermaid-sequence-actorman-line-color)}.messageLine0,.messageLine1{stroke:var(--md-mermaid-sequence-message-line-color)}.note{fill:var(--md-mermaid-sequence-note-bg-color);stroke:var(--md-mermaid-sequence-note-border-color)}.loopText,.loopText>tspan,.messageText,.noteText>tspan{stroke:none;font-family:var(--md-mermaid-font-family)!important}.messageText{fill:var(--md-mermaid-sequence-message-fg-color)}.loopText,.loopText>tspan{fill:var(--md-mermaid-sequence-loop-fg-color)}.noteText>tspan{fill:var(--md-mermaid-sequence-note-fg-color)}#arrowhead path{fill:var(--md-mermaid-sequence-message-line-color);stroke:none}.loopLine{fill:var(--md-mermaid-sequence-loop-bg-color);stroke:var(--md-mermaid-sequence-loop-border-color)}.labelBox{fill:var(--md-mermaid-sequence-label-bg-color);stroke:none}.labelText,.labelText>span{fill:var(--md-mermaid-sequence-label-fg-color);font-family:var(--md-mermaid-font-family)}.sequenceNumber{fill:var(--md-mermaid-sequence-number-fg-color)}rect.rect{fill:var(--md-mermaid-sequence-box-bg-color);stroke:none}rect.rect+text.text{fill:var(--md-mermaid-sequence-box-fg-color)}defs #sequencenumber{fill:var(--md-mermaid-sequence-number-bg-color)!important}";var Br,Da=0;function Va(){return typeof mermaid=="undefined"||mermaid instanceof Element?wt("https://unpkg.com/mermaid@10/dist/mermaid.min.js"):I(void 0)}function In(e){return e.classList.remove("mermaid"),Br||(Br=Va().pipe(E(()=>mermaid.initialize({startOnLoad:!1,themeCSS:Rn,sequence:{actorFontSize:"16px",messageFontSize:"16px",noteFontSize:"16px"}})),m(()=>{}),G(1))),Br.subscribe(()=>ao(this,null,function*(){e.classList.add("mermaid");let t=`__mermaid_${Da++}`,r=x("div",{class:"mermaid"}),o=e.textContent,{svg:n,fn:i}=yield mermaid.render(t,o),a=r.attachShadow({mode:"closed"});a.innerHTML=n,e.replaceWith(r),i==null||i(a)})),Br.pipe(m(()=>({ref:e})))}var Fn=x("table");function jn(e){return e.replaceWith(Fn),Fn.replaceWith(On(e)),I({ref:e})}function Na(e){let t=e.find(r=>r.checked)||e[0];return S(...e.map(r=>d(r,"change").pipe(m(()=>P(`label[for="${r.id}"]`))))).pipe(Q(P(`label[for="${t.id}"]`)),m(r=>({active:r})))}function Wn(e,{viewport$:t,target$:r}){let o=P(".tabbed-labels",e),n=$(":scope > input",e),i=Qr("prev");e.append(i);let a=Qr("next");return e.append(a),C(()=>{let s=new g,p=s.pipe(X(),ne(!0));z([s,ge(e),tt(e)]).pipe(U(p),Le(1,me)).subscribe({next([{active:c},l]){let f=Ue(c),{width:u}=ce(c);e.style.setProperty("--md-indicator-x",`${f.x}px`),e.style.setProperty("--md-indicator-width",`${u}px`);let h=pr(o);(f.xh.x+l.width)&&o.scrollTo({left:Math.max(0,f.x-16),behavior:"smooth"})},complete(){e.style.removeProperty("--md-indicator-x"),e.style.removeProperty("--md-indicator-width")}}),z([De(o),ge(o)]).pipe(U(p)).subscribe(([c,l])=>{let f=Tt(o);i.hidden=c.x<16,a.hidden=c.x>f.width-l.width-16}),S(d(i,"click").pipe(m(()=>-1)),d(a,"click").pipe(m(()=>1))).pipe(U(p)).subscribe(c=>{let{width:l}=ce(o);o.scrollBy({left:l*c,behavior:"smooth"})}),r.pipe(U(p),b(c=>n.includes(c))).subscribe(c=>c.click()),o.classList.add("tabbed-labels--linked");for(let c of n){let l=P(`label[for="${c.id}"]`);l.replaceChildren(x("a",{href:`#${l.htmlFor}`,tabIndex:-1},...Array.from(l.childNodes))),d(l.firstElementChild,"click").pipe(U(p),b(f=>!(f.metaKey||f.ctrlKey)),E(f=>{f.preventDefault(),f.stopPropagation()})).subscribe(()=>{history.replaceState({},"",`#${l.htmlFor}`),l.click()})}return B("content.tabs.link")&&s.pipe(Ce(1),ee(t)).subscribe(([{active:c},{offset:l}])=>{let f=c.innerText.trim();if(c.hasAttribute("data-md-switching"))c.removeAttribute("data-md-switching");else{let u=e.offsetTop-l.y;for(let w of $("[data-tabs]"))for(let A of $(":scope > input",w)){let te=P(`label[for="${A.id}"]`);if(te!==c&&te.innerText.trim()===f){te.setAttribute("data-md-switching",""),A.click();break}}window.scrollTo({top:e.offsetTop-u});let h=__md_get("__tabs")||[];__md_set("__tabs",[...new Set([f,...h])])}}),s.pipe(U(p)).subscribe(()=>{for(let c of $("audio, video",e))c.pause()}),Na(n).pipe(E(c=>s.next(c)),L(()=>s.complete()),m(c=>R({ref:e},c)))}).pipe(Qe(se))}function Un(e,{viewport$:t,target$:r,print$:o}){return S(...$(".annotate:not(.highlight)",e).map(n=>Cn(n,{target$:r,print$:o})),...$("pre:not(.mermaid) > code",e).map(n=>$n(n,{target$:r,print$:o})),...$("pre.mermaid",e).map(n=>In(n)),...$("table:not([class])",e).map(n=>jn(n)),...$("details",e).map(n=>Pn(n,{target$:r,print$:o})),...$("[data-tabs]",e).map(n=>Wn(n,{viewport$:t,target$:r})),...$("[title]",e).filter(()=>B("content.tooltips")).map(n=>lt(n,{viewport$:t})))}function za(e,{alert$:t}){return t.pipe(v(r=>S(I(!0),I(!1).pipe(Ge(2e3))).pipe(m(o=>({message:r,active:o})))))}function Dn(e,t){let r=P(".md-typeset",e);return C(()=>{let o=new g;return o.subscribe(({message:n,active:i})=>{e.classList.toggle("md-dialog--active",i),r.textContent=n}),za(e,t).pipe(E(n=>o.next(n)),L(()=>o.complete()),m(n=>R({ref:e},n)))})}var qa=0;function Qa(e,t){document.body.append(e);let{width:r}=ce(e);e.style.setProperty("--md-tooltip-width",`${r}px`),e.remove();let o=cr(t),n=typeof o!="undefined"?De(o):I({x:0,y:0}),i=S(et(t),kt(t)).pipe(K());return z([i,n]).pipe(m(([a,s])=>{let{x:p,y:c}=Ue(t),l=ce(t),f=t.closest("table");return f&&t.parentElement&&(p+=f.offsetLeft+t.parentElement.offsetLeft,c+=f.offsetTop+t.parentElement.offsetTop),{active:a,offset:{x:p-s.x+l.width/2-r/2,y:c-s.y+l.height+8}}}))}function Vn(e){let t=e.title;if(!t.length)return O;let r=`__tooltip_${qa++}`,o=Pt(r,"inline"),n=P(".md-typeset",o);return n.innerHTML=t,C(()=>{let i=new g;return i.subscribe({next({offset:a}){o.style.setProperty("--md-tooltip-x",`${a.x}px`),o.style.setProperty("--md-tooltip-y",`${a.y}px`)},complete(){o.style.removeProperty("--md-tooltip-x"),o.style.removeProperty("--md-tooltip-y")}}),S(i.pipe(b(({active:a})=>a)),i.pipe(_e(250),b(({active:a})=>!a))).subscribe({next({active:a}){a?(e.insertAdjacentElement("afterend",o),e.setAttribute("aria-describedby",r),e.removeAttribute("title")):(o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t))},complete(){o.remove(),e.removeAttribute("aria-describedby"),e.setAttribute("title",t)}}),i.pipe(Le(16,me)).subscribe(({active:a})=>{o.classList.toggle("md-tooltip--active",a)}),i.pipe(ct(125,me),b(()=>!!e.offsetParent),m(()=>e.offsetParent.getBoundingClientRect()),m(({x:a})=>a)).subscribe({next(a){a?o.style.setProperty("--md-tooltip-0",`${-a}px`):o.style.removeProperty("--md-tooltip-0")},complete(){o.style.removeProperty("--md-tooltip-0")}}),Qa(o,e).pipe(E(a=>i.next(a)),L(()=>i.complete()),m(a=>R({ref:e},a)))}).pipe(Qe(se))}function Ka({viewport$:e}){if(!B("header.autohide"))return I(!1);let t=e.pipe(m(({offset:{y:n}})=>n),Ye(2,1),m(([n,i])=>[nMath.abs(i-n.y)>100),m(([,[n]])=>n),K()),o=Ve("search");return z([e,o]).pipe(m(([{offset:n},i])=>n.y>400&&!i),K(),v(n=>n?r:I(!1)),Q(!1))}function Nn(e,t){return C(()=>z([ge(e),Ka(t)])).pipe(m(([{height:r},o])=>({height:r,hidden:o})),K((r,o)=>r.height===o.height&&r.hidden===o.hidden),G(1))}function zn(e,{header$:t,main$:r}){return C(()=>{let o=new g,n=o.pipe(X(),ne(!0));o.pipe(Z("active"),We(t)).subscribe(([{active:a},{hidden:s}])=>{e.classList.toggle("md-header--shadow",a&&!s),e.hidden=s});let i=ue($("[title]",e)).pipe(b(()=>B("content.tooltips")),oe(a=>Vn(a)));return r.subscribe(o),t.pipe(U(n),m(a=>R({ref:e},a)),Pe(i.pipe(U(n))))})}function Ya(e,{viewport$:t,header$:r}){return mr(e,{viewport$:t,header$:r}).pipe(m(({offset:{y:o}})=>{let{height:n}=ce(e);return{active:o>=n}}),Z("active"))}function qn(e,t){return C(()=>{let r=new g;r.subscribe({next({active:n}){e.classList.toggle("md-header__title--active",n)},complete(){e.classList.remove("md-header__title--active")}});let o=fe(".md-content h1");return typeof o=="undefined"?O:Ya(o,t).pipe(E(n=>r.next(n)),L(()=>r.complete()),m(n=>R({ref:e},n)))})}function Qn(e,{viewport$:t,header$:r}){let o=r.pipe(m(({height:i})=>i),K()),n=o.pipe(v(()=>ge(e).pipe(m(({height:i})=>({top:e.offsetTop,bottom:e.offsetTop+i})),Z("bottom"))));return z([o,n,t]).pipe(m(([i,{top:a,bottom:s},{offset:{y:p},size:{height:c}}])=>(c=Math.max(0,c-Math.max(0,a-p,i)-Math.max(0,c+p-s)),{offset:a-i,height:c,active:a-i<=p})),K((i,a)=>i.offset===a.offset&&i.height===a.height&&i.active===a.active))}function Ba(e){let t=__md_get("__palette")||{index:e.findIndex(o=>matchMedia(o.getAttribute("data-md-color-media")).matches)},r=Math.max(0,Math.min(t.index,e.length-1));return I(...e).pipe(oe(o=>d(o,"change").pipe(m(()=>o))),Q(e[r]),m(o=>({index:e.indexOf(o),color:{media:o.getAttribute("data-md-color-media"),scheme:o.getAttribute("data-md-color-scheme"),primary:o.getAttribute("data-md-color-primary"),accent:o.getAttribute("data-md-color-accent")}})),G(1))}function Kn(e){let t=$("input",e),r=x("meta",{name:"theme-color"});document.head.appendChild(r);let o=x("meta",{name:"color-scheme"});document.head.appendChild(o);let n=$t("(prefers-color-scheme: light)");return C(()=>{let i=new g;return i.subscribe(a=>{if(document.body.setAttribute("data-md-color-switching",""),a.color.media==="(prefers-color-scheme)"){let s=matchMedia("(prefers-color-scheme: light)"),p=document.querySelector(s.matches?"[data-md-color-media='(prefers-color-scheme: light)']":"[data-md-color-media='(prefers-color-scheme: dark)']");a.color.scheme=p.getAttribute("data-md-color-scheme"),a.color.primary=p.getAttribute("data-md-color-primary"),a.color.accent=p.getAttribute("data-md-color-accent")}for(let[s,p]of Object.entries(a.color))document.body.setAttribute(`data-md-color-${s}`,p);for(let s=0;sa.key==="Enter"),ee(i,(a,s)=>s)).subscribe(({index:a})=>{a=(a+1)%t.length,t[a].click(),t[a].focus()}),i.pipe(m(()=>{let a=Se("header"),s=window.getComputedStyle(a);return o.content=s.colorScheme,s.backgroundColor.match(/\d+/g).map(p=>(+p).toString(16).padStart(2,"0")).join("")})).subscribe(a=>r.content=`#${a}`),i.pipe(be(se)).subscribe(()=>{document.body.removeAttribute("data-md-color-switching")}),Ba(t).pipe(U(n.pipe(Ce(1))),st(),E(a=>i.next(a)),L(()=>i.complete()),m(a=>R({ref:e},a)))})}function Yn(e,{progress$:t}){return C(()=>{let r=new g;return r.subscribe(({value:o})=>{e.style.setProperty("--md-progress-value",`${o}`)}),t.pipe(E(o=>r.next({value:o})),L(()=>r.complete()),m(o=>({ref:e,value:o})))})}var Gr=Vt(Yr());function Ga(e){e.setAttribute("data-md-copying","");let t=e.closest("[data-copy]"),r=t?t.getAttribute("data-copy"):e.innerText;return e.removeAttribute("data-md-copying"),r.trimEnd()}function Bn({alert$:e}){Gr.default.isSupported()&&new F(t=>{new Gr.default("[data-clipboard-target], [data-clipboard-text]",{text:r=>r.getAttribute("data-clipboard-text")||Ga(P(r.getAttribute("data-clipboard-target")))}).on("success",r=>t.next(r))}).pipe(E(t=>{t.trigger.focus()}),m(()=>Ee("clipboard.copied"))).subscribe(e)}function Gn(e,t){return e.protocol=t.protocol,e.hostname=t.hostname,e}function Ja(e,t){let r=new Map;for(let o of $("url",e)){let n=P("loc",o),i=[Gn(new URL(n.textContent),t)];r.set(`${i[0]}`,i);for(let a of $("[rel=alternate]",o)){let s=a.getAttribute("href");s!=null&&i.push(Gn(new URL(s),t))}}return r}function ur(e){return mn(new URL("sitemap.xml",e)).pipe(m(t=>Ja(t,new URL(e))),ve(()=>I(new Map)))}function Xa(e,t){if(!(e.target instanceof Element))return O;let r=e.target.closest("a");if(r===null)return O;if(r.target||e.metaKey||e.ctrlKey)return O;let o=new URL(r.href);return o.search=o.hash="",t.has(`${o}`)?(e.preventDefault(),I(new URL(r.href))):O}function Jn(e){let t=new Map;for(let r of $(":scope > *",e.head))t.set(r.outerHTML,r);return t}function Xn(e){for(let t of $("[href], [src]",e))for(let r of["href","src"]){let o=t.getAttribute(r);if(o&&!/^(?:[a-z]+:)?\/\//i.test(o)){t[r]=t[r];break}}return I(e)}function Za(e){for(let o of["[data-md-component=announce]","[data-md-component=container]","[data-md-component=header-topic]","[data-md-component=outdated]","[data-md-component=logo]","[data-md-component=skip]",...B("navigation.tabs.sticky")?["[data-md-component=tabs]"]:[]]){let n=fe(o),i=fe(o,e);typeof n!="undefined"&&typeof i!="undefined"&&n.replaceWith(i)}let t=Jn(document);for(let[o,n]of Jn(e))t.has(o)?t.delete(o):document.head.appendChild(n);for(let o of t.values()){let n=o.getAttribute("name");n!=="theme-color"&&n!=="color-scheme"&&o.remove()}let r=Se("container");return je($("script",r)).pipe(v(o=>{let n=e.createElement("script");if(o.src){for(let i of o.getAttributeNames())n.setAttribute(i,o.getAttribute(i));return o.replaceWith(n),new F(i=>{n.onload=()=>i.complete()})}else return n.textContent=o.textContent,o.replaceWith(n),O}),X(),ne(document))}function Zn({location$:e,viewport$:t,progress$:r}){let o=ye();if(location.protocol==="file:")return O;let n=ur(o.base);I(document).subscribe(Xn);let i=d(document.body,"click").pipe(We(n),v(([p,c])=>Xa(p,c)),pe()),a=d(window,"popstate").pipe(m(xe),pe());i.pipe(ee(t)).subscribe(([p,{offset:c}])=>{history.replaceState(c,""),history.pushState(null,"",p)}),S(i,a).subscribe(e);let s=e.pipe(Z("pathname"),v(p=>ln(p,{progress$:r}).pipe(ve(()=>(pt(p,!0),O)))),v(Xn),v(Za),pe());return S(s.pipe(ee(e,(p,c)=>c)),s.pipe(v(()=>e),Z("pathname"),v(()=>e),Z("hash")),e.pipe(K((p,c)=>p.pathname===c.pathname&&p.hash===c.hash),v(()=>i),E(()=>history.back()))).subscribe(p=>{var c,l;history.state!==null||!p.hash?window.scrollTo(0,(l=(c=history.state)==null?void 0:c.y)!=null?l:0):(history.scrollRestoration="auto",sn(p.hash),history.scrollRestoration="manual")}),e.subscribe(()=>{history.scrollRestoration="manual"}),d(window,"beforeunload").subscribe(()=>{history.scrollRestoration="auto"}),t.pipe(Z("offset"),_e(100)).subscribe(({offset:p})=>{history.replaceState(p,"")}),s}var ri=Vt(ti());function oi(e){let t=e.separator.split("|").map(n=>n.replace(/(\(\?[!=<][^)]+\))/g,"").length===0?"\uFFFD":n).join("|"),r=new RegExp(t,"img"),o=(n,i,a)=>`${i}${a}`;return n=>{n=n.replace(/[\s*+\-:~^]+/g," ").trim();let i=new RegExp(`(^|${e.separator}|)(${n.replace(/[|\\{}()[\]^$+*?.-]/g,"\\$&").replace(r,"|")})`,"img");return a=>(0,ri.default)(a).replace(i,o).replace(/<\/mark>(\s+)]*>/img,"$1")}}function It(e){return e.type===1}function dr(e){return e.type===3}function ni(e,t){let r=vn(e);return S(I(location.protocol!=="file:"),Ve("search")).pipe(Ae(o=>o),v(()=>t)).subscribe(({config:o,docs:n})=>r.next({type:0,data:{config:o,docs:n,options:{suggest:B("search.suggest")}}})),r}function ii({document$:e}){let t=ye(),r=Ne(new URL("../versions.json",t.base)).pipe(ve(()=>O)),o=r.pipe(m(n=>{let[,i]=t.base.match(/([^/]+)\/?$/);return n.find(({version:a,aliases:s})=>a===i||s.includes(i))||n[0]}));r.pipe(m(n=>new Map(n.map(i=>[`${new URL(`../${i.version}/`,t.base)}`,i]))),v(n=>d(document.body,"click").pipe(b(i=>!i.metaKey&&!i.ctrlKey),ee(o),v(([i,a])=>{if(i.target instanceof Element){let s=i.target.closest("a");if(s&&!s.target&&n.has(s.href)){let p=s.href;return!i.target.closest(".md-version")&&n.get(p)===a?O:(i.preventDefault(),I(p))}}return O}),v(i=>ur(new URL(i)).pipe(m(a=>{let p=xe().href.replace(t.base,i);return a.has(p.split("#")[0])?new URL(p):new URL(i)})))))).subscribe(n=>pt(n,!0)),z([r,o]).subscribe(([n,i])=>{P(".md-header__topic").appendChild(Mn(n,i))}),e.pipe(v(()=>o)).subscribe(n=>{var a;let i=__md_get("__outdated",sessionStorage);if(i===null){i=!0;let s=((a=t.version)==null?void 0:a.default)||"latest";Array.isArray(s)||(s=[s]);e:for(let p of s)for(let c of n.aliases.concat(n.version))if(new RegExp(p,"i").test(c)){i=!1;break e}__md_set("__outdated",i,sessionStorage)}if(i)for(let s of ae("outdated"))s.hidden=!1})}function ns(e,{worker$:t}){let{searchParams:r}=xe();r.has("q")&&(Je("search",!0),e.value=r.get("q"),e.focus(),Ve("search").pipe(Ae(i=>!i)).subscribe(()=>{let i=xe();i.searchParams.delete("q"),history.replaceState({},"",`${i}`)}));let o=et(e),n=S(t.pipe(Ae(It)),d(e,"keyup"),o).pipe(m(()=>e.value),K());return z([n,o]).pipe(m(([i,a])=>({value:i,focus:a})),G(1))}function ai(e,{worker$:t}){let r=new g,o=r.pipe(X(),ne(!0));z([t.pipe(Ae(It)),r],(i,a)=>a).pipe(Z("value")).subscribe(({value:i})=>t.next({type:2,data:i})),r.pipe(Z("focus")).subscribe(({focus:i})=>{i&&Je("search",i)}),d(e.form,"reset").pipe(U(o)).subscribe(()=>e.focus());let n=P("header [for=__search]");return d(n,"click").subscribe(()=>e.focus()),ns(e,{worker$:t}).pipe(E(i=>r.next(i)),L(()=>r.complete()),m(i=>R({ref:e},i)),G(1))}function si(e,{worker$:t,query$:r}){let o=new g,n=tn(e.parentElement).pipe(b(Boolean)),i=e.parentElement,a=P(":scope > :first-child",e),s=P(":scope > :last-child",e);Ve("search").subscribe(l=>s.setAttribute("role",l?"list":"presentation")),o.pipe(ee(r),Ur(t.pipe(Ae(It)))).subscribe(([{items:l},{value:f}])=>{switch(l.length){case 0:a.textContent=f.length?Ee("search.result.none"):Ee("search.result.placeholder");break;case 1:a.textContent=Ee("search.result.one");break;default:let u=sr(l.length);a.textContent=Ee("search.result.other",u)}});let p=o.pipe(E(()=>s.innerHTML=""),v(({items:l})=>S(I(...l.slice(0,10)),I(...l.slice(10)).pipe(Ye(4),Vr(n),v(([f])=>f)))),m(Tn),pe());return p.subscribe(l=>s.appendChild(l)),p.pipe(oe(l=>{let f=fe("details",l);return typeof f=="undefined"?O:d(f,"toggle").pipe(U(o),m(()=>f))})).subscribe(l=>{l.open===!1&&l.offsetTop<=i.scrollTop&&i.scrollTo({top:l.offsetTop})}),t.pipe(b(dr),m(({data:l})=>l)).pipe(E(l=>o.next(l)),L(()=>o.complete()),m(l=>R({ref:e},l)))}function is(e,{query$:t}){return t.pipe(m(({value:r})=>{let o=xe();return o.hash="",r=r.replace(/\s+/g,"+").replace(/&/g,"%26").replace(/=/g,"%3D"),o.search=`q=${r}`,{url:o}}))}function ci(e,t){let r=new g,o=r.pipe(X(),ne(!0));return r.subscribe(({url:n})=>{e.setAttribute("data-clipboard-text",e.href),e.href=`${n}`}),d(e,"click").pipe(U(o)).subscribe(n=>n.preventDefault()),is(e,t).pipe(E(n=>r.next(n)),L(()=>r.complete()),m(n=>R({ref:e},n)))}function pi(e,{worker$:t,keyboard$:r}){let o=new g,n=Se("search-query"),i=S(d(n,"keydown"),d(n,"focus")).pipe(be(se),m(()=>n.value),K());return o.pipe(We(i),m(([{suggest:s},p])=>{let c=p.split(/([\s-]+)/);if(s!=null&&s.length&&c[c.length-1]){let l=s[s.length-1];l.startsWith(c[c.length-1])&&(c[c.length-1]=l)}else c.length=0;return c})).subscribe(s=>e.innerHTML=s.join("").replace(/\s/g," ")),r.pipe(b(({mode:s})=>s==="search")).subscribe(s=>{switch(s.type){case"ArrowRight":e.innerText.length&&n.selectionStart===n.value.length&&(n.value=e.innerText);break}}),t.pipe(b(dr),m(({data:s})=>s)).pipe(E(s=>o.next(s)),L(()=>o.complete()),m(()=>({ref:e})))}function li(e,{index$:t,keyboard$:r}){let o=ye();try{let n=ni(o.search,t),i=Se("search-query",e),a=Se("search-result",e);d(e,"click").pipe(b(({target:p})=>p instanceof Element&&!!p.closest("a"))).subscribe(()=>Je("search",!1)),r.pipe(b(({mode:p})=>p==="search")).subscribe(p=>{let c=Re();switch(p.type){case"Enter":if(c===i){let l=new Map;for(let f of $(":first-child [href]",a)){let u=f.firstElementChild;l.set(f,parseFloat(u.getAttribute("data-md-score")))}if(l.size){let[[f]]=[...l].sort(([,u],[,h])=>h-u);f.click()}p.claim()}break;case"Escape":case"Tab":Je("search",!1),i.blur();break;case"ArrowUp":case"ArrowDown":if(typeof c=="undefined")i.focus();else{let l=[i,...$(":not(details) > [href], summary, details[open] [href]",a)],f=Math.max(0,(Math.max(0,l.indexOf(c))+l.length+(p.type==="ArrowUp"?-1:1))%l.length);l[f].focus()}p.claim();break;default:i!==Re()&&i.focus()}}),r.pipe(b(({mode:p})=>p==="global")).subscribe(p=>{switch(p.type){case"f":case"s":case"/":i.focus(),i.select(),p.claim();break}});let s=ai(i,{worker$:n});return S(s,si(a,{worker$:n,query$:s})).pipe(Pe(...ae("search-share",e).map(p=>ci(p,{query$:s})),...ae("search-suggest",e).map(p=>pi(p,{worker$:n,keyboard$:r}))))}catch(n){return e.hidden=!0,Ke}}function mi(e,{index$:t,location$:r}){return z([t,r.pipe(Q(xe()),b(o=>!!o.searchParams.get("h")))]).pipe(m(([o,n])=>oi(o.config)(n.searchParams.get("h"))),m(o=>{var a;let n=new Map,i=document.createNodeIterator(e,NodeFilter.SHOW_TEXT);for(let s=i.nextNode();s;s=i.nextNode())if((a=s.parentElement)!=null&&a.offsetHeight){let p=s.textContent,c=o(p);c.length>p.length&&n.set(s,c)}for(let[s,p]of n){let{childNodes:c}=x("span",null,p);s.replaceWith(...Array.from(c))}return{ref:e,nodes:n}}))}function as(e,{viewport$:t,main$:r}){let o=e.closest(".md-grid"),n=o.offsetTop-o.parentElement.offsetTop;return z([r,t]).pipe(m(([{offset:i,height:a},{offset:{y:s}}])=>(a=a+Math.min(n,Math.max(0,s-i))-n,{height:a,locked:s>=i+n})),K((i,a)=>i.height===a.height&&i.locked===a.locked))}function Jr(e,o){var n=o,{header$:t}=n,r=io(n,["header$"]);let i=P(".md-sidebar__scrollwrap",e),{y:a}=Ue(i);return C(()=>{let s=new g,p=s.pipe(X(),ne(!0)),c=s.pipe(Le(0,me));return c.pipe(ee(t)).subscribe({next([{height:l},{height:f}]){i.style.height=`${l-2*a}px`,e.style.top=`${f}px`},complete(){i.style.height="",e.style.top=""}}),c.pipe(Ae()).subscribe(()=>{for(let l of $(".md-nav__link--active[href]",e)){if(!l.clientHeight)continue;let f=l.closest(".md-sidebar__scrollwrap");if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:h}=ce(f);f.scrollTo({top:u-h/2})}}}),ue($("label[tabindex]",e)).pipe(oe(l=>d(l,"click").pipe(be(se),m(()=>l),U(p)))).subscribe(l=>{let f=P(`[id="${l.htmlFor}"]`);P(`[aria-labelledby="${l.id}"]`).setAttribute("aria-expanded",`${f.checked}`)}),as(e,r).pipe(E(l=>s.next(l)),L(()=>s.complete()),m(l=>R({ref:e},l)))})}function fi(e,t){if(typeof t!="undefined"){let r=`https://api.github.com/repos/${e}/${t}`;return Ct(Ne(`${r}/releases/latest`).pipe(ve(()=>O),m(o=>({version:o.tag_name})),Be({})),Ne(r).pipe(ve(()=>O),m(o=>({stars:o.stargazers_count,forks:o.forks_count})),Be({}))).pipe(m(([o,n])=>R(R({},o),n)))}else{let r=`https://api.github.com/users/${e}`;return Ne(r).pipe(m(o=>({repositories:o.public_repos})),Be({}))}}function ui(e,t){let r=`https://${e}/api/v4/projects/${encodeURIComponent(t)}`;return Ne(r).pipe(ve(()=>O),m(({star_count:o,forks_count:n})=>({stars:o,forks:n})),Be({}))}function di(e){let t=e.match(/^.+github\.com\/([^/]+)\/?([^/]+)?/i);if(t){let[,r,o]=t;return fi(r,o)}if(t=e.match(/^.+?([^/]*gitlab[^/]+)\/(.+?)\/?$/i),t){let[,r,o]=t;return ui(r,o)}return O}var ss;function cs(e){return ss||(ss=C(()=>{let t=__md_get("__source",sessionStorage);if(t)return I(t);if(ae("consent").length){let o=__md_get("__consent");if(!(o&&o.github))return O}return di(e.href).pipe(E(o=>__md_set("__source",o,sessionStorage)))}).pipe(ve(()=>O),b(t=>Object.keys(t).length>0),m(t=>({facts:t})),G(1)))}function hi(e){let t=P(":scope > :last-child",e);return C(()=>{let r=new g;return r.subscribe(({facts:o})=>{t.appendChild(Sn(o)),t.classList.add("md-source__repository--active")}),cs(e).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}function ps(e,{viewport$:t,header$:r}){return ge(document.body).pipe(v(()=>mr(e,{header$:r,viewport$:t})),m(({offset:{y:o}})=>({hidden:o>=10})),Z("hidden"))}function bi(e,t){return C(()=>{let r=new g;return r.subscribe({next({hidden:o}){e.hidden=o},complete(){e.hidden=!1}}),(B("navigation.tabs.sticky")?I({hidden:!1}):ps(e,t)).pipe(E(o=>r.next(o)),L(()=>r.complete()),m(o=>R({ref:e},o)))})}function ls(e,{viewport$:t,header$:r}){let o=new Map,n=$(".md-nav__link",e);for(let s of n){let p=decodeURIComponent(s.hash.substring(1)),c=fe(`[id="${p}"]`);typeof c!="undefined"&&o.set(s,c)}let i=r.pipe(Z("height"),m(({height:s})=>{let p=Se("main"),c=P(":scope > :first-child",p);return s+.8*(c.offsetTop-p.offsetTop)}),pe());return ge(document.body).pipe(Z("height"),v(s=>C(()=>{let p=[];return I([...o].reduce((c,[l,f])=>{for(;p.length&&o.get(p[p.length-1]).tagName>=f.tagName;)p.pop();let u=f.offsetTop;for(;!u&&f.parentElement;)f=f.parentElement,u=f.offsetTop;let h=f.offsetParent;for(;h;h=h.offsetParent)u+=h.offsetTop;return c.set([...p=[...p,l]].reverse(),u)},new Map))}).pipe(m(p=>new Map([...p].sort(([,c],[,l])=>c-l))),We(i),v(([p,c])=>t.pipe(jr(([l,f],{offset:{y:u},size:h})=>{let w=u+h.height>=Math.floor(s.height);for(;f.length;){let[,A]=f[0];if(A-c=u&&!w)f=[l.pop(),...f];else break}return[l,f]},[[],[...p]]),K((l,f)=>l[0]===f[0]&&l[1]===f[1])))))).pipe(m(([s,p])=>({prev:s.map(([c])=>c),next:p.map(([c])=>c)})),Q({prev:[],next:[]}),Ye(2,1),m(([s,p])=>s.prev.length{let i=new g,a=i.pipe(X(),ne(!0));if(i.subscribe(({prev:s,next:p})=>{for(let[c]of p)c.classList.remove("md-nav__link--passed"),c.classList.remove("md-nav__link--active");for(let[c,[l]]of s.entries())l.classList.add("md-nav__link--passed"),l.classList.toggle("md-nav__link--active",c===s.length-1)}),B("toc.follow")){let s=S(t.pipe(_e(1),m(()=>{})),t.pipe(_e(250),m(()=>"smooth")));i.pipe(b(({prev:p})=>p.length>0),We(o.pipe(be(se))),ee(s)).subscribe(([[{prev:p}],c])=>{let[l]=p[p.length-1];if(l.offsetHeight){let f=cr(l);if(typeof f!="undefined"){let u=l.offsetTop-f.offsetTop,{height:h}=ce(f);f.scrollTo({top:u-h/2,behavior:c})}}})}return B("navigation.tracking")&&t.pipe(U(a),Z("offset"),_e(250),Ce(1),U(n.pipe(Ce(1))),st({delay:250}),ee(i)).subscribe(([,{prev:s}])=>{let p=xe(),c=s[s.length-1];if(c&&c.length){let[l]=c,{hash:f}=new URL(l.href);p.hash!==f&&(p.hash=f,history.replaceState({},"",`${p}`))}else p.hash="",history.replaceState({},"",`${p}`)}),ls(e,{viewport$:t,header$:r}).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))})}function ms(e,{viewport$:t,main$:r,target$:o}){let n=t.pipe(m(({offset:{y:a}})=>a),Ye(2,1),m(([a,s])=>a>s&&s>0),K()),i=r.pipe(m(({active:a})=>a));return z([i,n]).pipe(m(([a,s])=>!(a&&s)),K(),U(o.pipe(Ce(1))),ne(!0),st({delay:250}),m(a=>({hidden:a})))}function gi(e,{viewport$:t,header$:r,main$:o,target$:n}){let i=new g,a=i.pipe(X(),ne(!0));return i.subscribe({next({hidden:s}){e.hidden=s,s?(e.setAttribute("tabindex","-1"),e.blur()):e.removeAttribute("tabindex")},complete(){e.style.top="",e.hidden=!0,e.removeAttribute("tabindex")}}),r.pipe(U(a),Z("height")).subscribe(({height:s})=>{e.style.top=`${s+16}px`}),d(e,"click").subscribe(s=>{s.preventDefault(),window.scrollTo({top:0})}),ms(e,{viewport$:t,main$:o,target$:n}).pipe(E(s=>i.next(s)),L(()=>i.complete()),m(s=>R({ref:e},s)))}function xi({document$:e,viewport$:t}){e.pipe(v(()=>$(".md-ellipsis")),oe(r=>tt(r).pipe(U(e.pipe(Ce(1))),b(o=>o),m(()=>r),Te(1))),b(r=>r.offsetWidth{let o=r.innerText,n=r.closest("a")||r;return n.title=o,B("content.tooltips")?lt(n,{viewport$:t}).pipe(U(e.pipe(Ce(1))),L(()=>n.removeAttribute("title"))):O})).subscribe(),B("content.tooltips")&&e.pipe(v(()=>$(".md-status")),oe(r=>lt(r,{viewport$:t}))).subscribe()}function yi({document$:e,tablet$:t}){e.pipe(v(()=>$(".md-toggle--indeterminate")),E(r=>{r.indeterminate=!0,r.checked=!1}),oe(r=>d(r,"change").pipe(Dr(()=>r.classList.contains("md-toggle--indeterminate")),m(()=>r))),ee(t)).subscribe(([r,o])=>{r.classList.remove("md-toggle--indeterminate"),o&&(r.checked=!1)})}function fs(){return/(iPad|iPhone|iPod)/.test(navigator.userAgent)}function Ei({document$:e}){e.pipe(v(()=>$("[data-md-scrollfix]")),E(t=>t.removeAttribute("data-md-scrollfix")),b(fs),oe(t=>d(t,"touchstart").pipe(m(()=>t)))).subscribe(t=>{let r=t.scrollTop;r===0?t.scrollTop=1:r+t.offsetHeight===t.scrollHeight&&(t.scrollTop=r-1)})}function wi({viewport$:e,tablet$:t}){z([Ve("search"),t]).pipe(m(([r,o])=>r&&!o),v(r=>I(r).pipe(Ge(r?400:100))),ee(e)).subscribe(([r,{offset:{y:o}}])=>{if(r)document.body.setAttribute("data-md-scrolllock",""),document.body.style.top=`-${o}px`;else{let n=-1*parseInt(document.body.style.top,10);document.body.removeAttribute("data-md-scrolllock"),document.body.style.top="",n&&window.scrollTo(0,n)}})}Object.entries||(Object.entries=function(e){let t=[];for(let r of Object.keys(e))t.push([r,e[r]]);return t});Object.values||(Object.values=function(e){let t=[];for(let r of Object.keys(e))t.push(e[r]);return t});typeof Element!="undefined"&&(Element.prototype.scrollTo||(Element.prototype.scrollTo=function(e,t){typeof e=="object"?(this.scrollLeft=e.left,this.scrollTop=e.top):(this.scrollLeft=e,this.scrollTop=t)}),Element.prototype.replaceWith||(Element.prototype.replaceWith=function(...e){let t=this.parentNode;if(t){e.length===0&&t.removeChild(this);for(let r=e.length-1;r>=0;r--){let o=e[r];typeof o=="string"?o=document.createTextNode(o):o.parentNode&&o.parentNode.removeChild(o),r?t.insertBefore(this.previousSibling,o):t.replaceChild(o,this)}}}));function us(){return location.protocol==="file:"?wt(`${new URL("search/search_index.js",Xr.base)}`).pipe(m(()=>__index),G(1)):Ne(new URL("search/search_index.json",Xr.base))}document.documentElement.classList.remove("no-js");document.documentElement.classList.add("js");var ot=Yo(),jt=nn(),Ot=cn(jt),Zr=on(),Oe=bn(),hr=$t("(min-width: 960px)"),Si=$t("(min-width: 1220px)"),Oi=pn(),Xr=ye(),Mi=document.forms.namedItem("search")?us():Ke,eo=new g;Bn({alert$:eo});var to=new g;B("navigation.instant")&&Zn({location$:jt,viewport$:Oe,progress$:to}).subscribe(ot);var Ti;((Ti=Xr.version)==null?void 0:Ti.provider)==="mike"&&ii({document$:ot});S(jt,Ot).pipe(Ge(125)).subscribe(()=>{Je("drawer",!1),Je("search",!1)});Zr.pipe(b(({mode:e})=>e==="global")).subscribe(e=>{switch(e.type){case"p":case",":let t=fe("link[rel=prev]");typeof t!="undefined"&&pt(t);break;case"n":case".":let r=fe("link[rel=next]");typeof r!="undefined"&&pt(r);break;case"Enter":let o=Re();o instanceof HTMLLabelElement&&o.click()}});xi({viewport$:Oe,document$:ot});yi({document$:ot,tablet$:hr});Ei({document$:ot});wi({viewport$:Oe,tablet$:hr});var rt=Nn(Se("header"),{viewport$:Oe}),Ft=ot.pipe(m(()=>Se("main")),v(e=>Qn(e,{viewport$:Oe,header$:rt})),G(1)),ds=S(...ae("consent").map(e=>xn(e,{target$:Ot})),...ae("dialog").map(e=>Dn(e,{alert$:eo})),...ae("header").map(e=>zn(e,{viewport$:Oe,header$:rt,main$:Ft})),...ae("palette").map(e=>Kn(e)),...ae("progress").map(e=>Yn(e,{progress$:to})),...ae("search").map(e=>li(e,{index$:Mi,keyboard$:Zr})),...ae("source").map(e=>hi(e))),hs=C(()=>S(...ae("announce").map(e=>gn(e)),...ae("content").map(e=>Un(e,{viewport$:Oe,target$:Ot,print$:Oi})),...ae("content").map(e=>B("search.highlight")?mi(e,{index$:Mi,location$:jt}):O),...ae("header-title").map(e=>qn(e,{viewport$:Oe,header$:rt})),...ae("sidebar").map(e=>e.getAttribute("data-md-type")==="navigation"?Nr(Si,()=>Jr(e,{viewport$:Oe,header$:rt,main$:Ft})):Nr(hr,()=>Jr(e,{viewport$:Oe,header$:rt,main$:Ft}))),...ae("tabs").map(e=>bi(e,{viewport$:Oe,header$:rt})),...ae("toc").map(e=>vi(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Ot})),...ae("top").map(e=>gi(e,{viewport$:Oe,header$:rt,main$:Ft,target$:Ot})))),Li=ot.pipe(v(()=>hs),Pe(ds),G(1));Li.subscribe();window.document$=ot;window.location$=jt;window.target$=Ot;window.keyboard$=Zr;window.viewport$=Oe;window.tablet$=hr;window.screen$=Si;window.print$=Oi;window.alert$=eo;window.progress$=to;window.component$=Li;})(); +//# sourceMappingURL=bundle.fe8b6f2b.min.js.map + diff --git a/assets/javascripts/bundle.fe8b6f2b.min.js.map b/assets/javascripts/bundle.fe8b6f2b.min.js.map new file mode 100644 index 00000000..82635852 --- /dev/null +++ b/assets/javascripts/bundle.fe8b6f2b.min.js.map @@ -0,0 +1,7 @@ +{ + "version": 3, + "sources": ["node_modules/focus-visible/dist/focus-visible.js", "node_modules/clipboard/dist/clipboard.js", "node_modules/escape-html/index.js", "src/templates/assets/javascripts/bundle.ts", "node_modules/rxjs/node_modules/tslib/tslib.es6.js", "node_modules/rxjs/src/internal/util/isFunction.ts", "node_modules/rxjs/src/internal/util/createErrorClass.ts", "node_modules/rxjs/src/internal/util/UnsubscriptionError.ts", "node_modules/rxjs/src/internal/util/arrRemove.ts", "node_modules/rxjs/src/internal/Subscription.ts", "node_modules/rxjs/src/internal/config.ts", "node_modules/rxjs/src/internal/scheduler/timeoutProvider.ts", "node_modules/rxjs/src/internal/util/reportUnhandledError.ts", "node_modules/rxjs/src/internal/util/noop.ts", "node_modules/rxjs/src/internal/NotificationFactories.ts", "node_modules/rxjs/src/internal/util/errorContext.ts", "node_modules/rxjs/src/internal/Subscriber.ts", "node_modules/rxjs/src/internal/symbol/observable.ts", "node_modules/rxjs/src/internal/util/identity.ts", "node_modules/rxjs/src/internal/util/pipe.ts", "node_modules/rxjs/src/internal/Observable.ts", "node_modules/rxjs/src/internal/util/lift.ts", "node_modules/rxjs/src/internal/operators/OperatorSubscriber.ts", "node_modules/rxjs/src/internal/scheduler/animationFrameProvider.ts", "node_modules/rxjs/src/internal/util/ObjectUnsubscribedError.ts", "node_modules/rxjs/src/internal/Subject.ts", "node_modules/rxjs/src/internal/BehaviorSubject.ts", "node_modules/rxjs/src/internal/scheduler/dateTimestampProvider.ts", "node_modules/rxjs/src/internal/ReplaySubject.ts", "node_modules/rxjs/src/internal/scheduler/Action.ts", "node_modules/rxjs/src/internal/scheduler/intervalProvider.ts", "node_modules/rxjs/src/internal/scheduler/AsyncAction.ts", "node_modules/rxjs/src/internal/Scheduler.ts", "node_modules/rxjs/src/internal/scheduler/AsyncScheduler.ts", "node_modules/rxjs/src/internal/scheduler/async.ts", "node_modules/rxjs/src/internal/scheduler/QueueAction.ts", "node_modules/rxjs/src/internal/scheduler/QueueScheduler.ts", "node_modules/rxjs/src/internal/scheduler/queue.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameAction.ts", "node_modules/rxjs/src/internal/scheduler/AnimationFrameScheduler.ts", "node_modules/rxjs/src/internal/scheduler/animationFrame.ts", "node_modules/rxjs/src/internal/observable/empty.ts", "node_modules/rxjs/src/internal/util/isScheduler.ts", "node_modules/rxjs/src/internal/util/args.ts", "node_modules/rxjs/src/internal/util/isArrayLike.ts", "node_modules/rxjs/src/internal/util/isPromise.ts", "node_modules/rxjs/src/internal/util/isInteropObservable.ts", "node_modules/rxjs/src/internal/util/isAsyncIterable.ts", "node_modules/rxjs/src/internal/util/throwUnobservableError.ts", "node_modules/rxjs/src/internal/symbol/iterator.ts", "node_modules/rxjs/src/internal/util/isIterable.ts", "node_modules/rxjs/src/internal/util/isReadableStreamLike.ts", "node_modules/rxjs/src/internal/observable/innerFrom.ts", "node_modules/rxjs/src/internal/util/executeSchedule.ts", "node_modules/rxjs/src/internal/operators/observeOn.ts", "node_modules/rxjs/src/internal/operators/subscribeOn.ts", "node_modules/rxjs/src/internal/scheduled/scheduleObservable.ts", "node_modules/rxjs/src/internal/scheduled/schedulePromise.ts", "node_modules/rxjs/src/internal/scheduled/scheduleArray.ts", "node_modules/rxjs/src/internal/scheduled/scheduleIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleAsyncIterable.ts", "node_modules/rxjs/src/internal/scheduled/scheduleReadableStreamLike.ts", "node_modules/rxjs/src/internal/scheduled/scheduled.ts", "node_modules/rxjs/src/internal/observable/from.ts", "node_modules/rxjs/src/internal/observable/of.ts", "node_modules/rxjs/src/internal/observable/throwError.ts", "node_modules/rxjs/src/internal/util/EmptyError.ts", "node_modules/rxjs/src/internal/util/isDate.ts", "node_modules/rxjs/src/internal/operators/map.ts", "node_modules/rxjs/src/internal/util/mapOneOrManyArgs.ts", "node_modules/rxjs/src/internal/util/argsArgArrayOrObject.ts", "node_modules/rxjs/src/internal/util/createObject.ts", "node_modules/rxjs/src/internal/observable/combineLatest.ts", "node_modules/rxjs/src/internal/operators/mergeInternals.ts", "node_modules/rxjs/src/internal/operators/mergeMap.ts", "node_modules/rxjs/src/internal/operators/mergeAll.ts", "node_modules/rxjs/src/internal/operators/concatAll.ts", "node_modules/rxjs/src/internal/observable/concat.ts", "node_modules/rxjs/src/internal/observable/defer.ts", "node_modules/rxjs/src/internal/observable/fromEvent.ts", "node_modules/rxjs/src/internal/observable/fromEventPattern.ts", "node_modules/rxjs/src/internal/observable/timer.ts", "node_modules/rxjs/src/internal/observable/merge.ts", "node_modules/rxjs/src/internal/observable/never.ts", "node_modules/rxjs/src/internal/util/argsOrArgArray.ts", "node_modules/rxjs/src/internal/operators/filter.ts", "node_modules/rxjs/src/internal/observable/zip.ts", "node_modules/rxjs/src/internal/operators/audit.ts", "node_modules/rxjs/src/internal/operators/auditTime.ts", "node_modules/rxjs/src/internal/operators/bufferCount.ts", "node_modules/rxjs/src/internal/operators/catchError.ts", "node_modules/rxjs/src/internal/operators/scanInternals.ts", "node_modules/rxjs/src/internal/operators/combineLatest.ts", "node_modules/rxjs/src/internal/operators/combineLatestWith.ts", "node_modules/rxjs/src/internal/operators/debounce.ts", "node_modules/rxjs/src/internal/operators/debounceTime.ts", "node_modules/rxjs/src/internal/operators/defaultIfEmpty.ts", "node_modules/rxjs/src/internal/operators/take.ts", "node_modules/rxjs/src/internal/operators/ignoreElements.ts", "node_modules/rxjs/src/internal/operators/mapTo.ts", "node_modules/rxjs/src/internal/operators/delayWhen.ts", "node_modules/rxjs/src/internal/operators/delay.ts", "node_modules/rxjs/src/internal/operators/distinctUntilChanged.ts", "node_modules/rxjs/src/internal/operators/distinctUntilKeyChanged.ts", "node_modules/rxjs/src/internal/operators/throwIfEmpty.ts", "node_modules/rxjs/src/internal/operators/endWith.ts", "node_modules/rxjs/src/internal/operators/finalize.ts", "node_modules/rxjs/src/internal/operators/first.ts", "node_modules/rxjs/src/internal/operators/takeLast.ts", "node_modules/rxjs/src/internal/operators/merge.ts", "node_modules/rxjs/src/internal/operators/mergeWith.ts", "node_modules/rxjs/src/internal/operators/repeat.ts", "node_modules/rxjs/src/internal/operators/scan.ts", "node_modules/rxjs/src/internal/operators/share.ts", "node_modules/rxjs/src/internal/operators/shareReplay.ts", "node_modules/rxjs/src/internal/operators/skip.ts", "node_modules/rxjs/src/internal/operators/skipUntil.ts", "node_modules/rxjs/src/internal/operators/startWith.ts", "node_modules/rxjs/src/internal/operators/switchMap.ts", "node_modules/rxjs/src/internal/operators/takeUntil.ts", "node_modules/rxjs/src/internal/operators/takeWhile.ts", "node_modules/rxjs/src/internal/operators/tap.ts", "node_modules/rxjs/src/internal/operators/throttle.ts", "node_modules/rxjs/src/internal/operators/throttleTime.ts", "node_modules/rxjs/src/internal/operators/withLatestFrom.ts", "node_modules/rxjs/src/internal/operators/zip.ts", "node_modules/rxjs/src/internal/operators/zipWith.ts", "src/templates/assets/javascripts/browser/document/index.ts", "src/templates/assets/javascripts/browser/element/_/index.ts", "src/templates/assets/javascripts/browser/element/focus/index.ts", "src/templates/assets/javascripts/browser/element/hover/index.ts", "src/templates/assets/javascripts/utilities/h/index.ts", "src/templates/assets/javascripts/utilities/round/index.ts", "src/templates/assets/javascripts/browser/script/index.ts", "src/templates/assets/javascripts/browser/element/size/_/index.ts", "src/templates/assets/javascripts/browser/element/size/content/index.ts", "src/templates/assets/javascripts/browser/element/offset/_/index.ts", "src/templates/assets/javascripts/browser/element/offset/content/index.ts", "src/templates/assets/javascripts/browser/element/visibility/index.ts", "src/templates/assets/javascripts/browser/toggle/index.ts", "src/templates/assets/javascripts/browser/keyboard/index.ts", "src/templates/assets/javascripts/browser/location/_/index.ts", "src/templates/assets/javascripts/browser/location/hash/index.ts", "src/templates/assets/javascripts/browser/media/index.ts", "src/templates/assets/javascripts/browser/request/index.ts", "src/templates/assets/javascripts/browser/viewport/offset/index.ts", "src/templates/assets/javascripts/browser/viewport/size/index.ts", "src/templates/assets/javascripts/browser/viewport/_/index.ts", "src/templates/assets/javascripts/browser/viewport/at/index.ts", "src/templates/assets/javascripts/browser/worker/index.ts", "src/templates/assets/javascripts/_/index.ts", "src/templates/assets/javascripts/components/_/index.ts", "src/templates/assets/javascripts/components/announce/index.ts", "src/templates/assets/javascripts/components/consent/index.ts", "src/templates/assets/javascripts/templates/tooltip/index.tsx", "src/templates/assets/javascripts/templates/annotation/index.tsx", "src/templates/assets/javascripts/templates/clipboard/index.tsx", "src/templates/assets/javascripts/templates/search/index.tsx", "src/templates/assets/javascripts/templates/source/index.tsx", "src/templates/assets/javascripts/templates/tabbed/index.tsx", "src/templates/assets/javascripts/templates/table/index.tsx", "src/templates/assets/javascripts/templates/version/index.tsx", "src/templates/assets/javascripts/components/tooltip2/index.ts", "src/templates/assets/javascripts/components/content/annotation/_/index.ts", "src/templates/assets/javascripts/components/content/annotation/list/index.ts", "src/templates/assets/javascripts/components/content/annotation/block/index.ts", "src/templates/assets/javascripts/components/content/code/_/index.ts", "src/templates/assets/javascripts/components/content/details/index.ts", "src/templates/assets/javascripts/components/content/mermaid/index.css", "src/templates/assets/javascripts/components/content/mermaid/index.ts", "src/templates/assets/javascripts/components/content/table/index.ts", "src/templates/assets/javascripts/components/content/tabs/index.ts", "src/templates/assets/javascripts/components/content/_/index.ts", "src/templates/assets/javascripts/components/dialog/index.ts", "src/templates/assets/javascripts/components/tooltip/index.ts", "src/templates/assets/javascripts/components/header/_/index.ts", "src/templates/assets/javascripts/components/header/title/index.ts", "src/templates/assets/javascripts/components/main/index.ts", "src/templates/assets/javascripts/components/palette/index.ts", "src/templates/assets/javascripts/components/progress/index.ts", "src/templates/assets/javascripts/integrations/clipboard/index.ts", "src/templates/assets/javascripts/integrations/sitemap/index.ts", "src/templates/assets/javascripts/integrations/instant/index.ts", "src/templates/assets/javascripts/integrations/search/highlighter/index.ts", "src/templates/assets/javascripts/integrations/search/worker/message/index.ts", "src/templates/assets/javascripts/integrations/search/worker/_/index.ts", "src/templates/assets/javascripts/integrations/version/index.ts", "src/templates/assets/javascripts/components/search/query/index.ts", "src/templates/assets/javascripts/components/search/result/index.ts", "src/templates/assets/javascripts/components/search/share/index.ts", "src/templates/assets/javascripts/components/search/suggest/index.ts", "src/templates/assets/javascripts/components/search/_/index.ts", "src/templates/assets/javascripts/components/search/highlight/index.ts", "src/templates/assets/javascripts/components/sidebar/index.ts", "src/templates/assets/javascripts/components/source/facts/github/index.ts", "src/templates/assets/javascripts/components/source/facts/gitlab/index.ts", "src/templates/assets/javascripts/components/source/facts/_/index.ts", "src/templates/assets/javascripts/components/source/_/index.ts", "src/templates/assets/javascripts/components/tabs/index.ts", "src/templates/assets/javascripts/components/toc/index.ts", "src/templates/assets/javascripts/components/top/index.ts", "src/templates/assets/javascripts/patches/ellipsis/index.ts", "src/templates/assets/javascripts/patches/indeterminate/index.ts", "src/templates/assets/javascripts/patches/scrollfix/index.ts", "src/templates/assets/javascripts/patches/scrolllock/index.ts", "src/templates/assets/javascripts/polyfills/index.ts"], + "sourcesContent": ["(function (global, factory) {\n typeof exports === 'object' && typeof module !== 'undefined' ? factory() :\n typeof define === 'function' && define.amd ? define(factory) :\n (factory());\n}(this, (function () { 'use strict';\n\n /**\n * Applies the :focus-visible polyfill at the given scope.\n * A scope in this case is either the top-level Document or a Shadow Root.\n *\n * @param {(Document|ShadowRoot)} scope\n * @see https://github.com/WICG/focus-visible\n */\n function applyFocusVisiblePolyfill(scope) {\n var hadKeyboardEvent = true;\n var hadFocusVisibleRecently = false;\n var hadFocusVisibleRecentlyTimeout = null;\n\n var inputTypesAllowlist = {\n text: true,\n search: true,\n url: true,\n tel: true,\n email: true,\n password: true,\n number: true,\n date: true,\n month: true,\n week: true,\n time: true,\n datetime: true,\n 'datetime-local': true\n };\n\n /**\n * Helper function for legacy browsers and iframes which sometimes focus\n * elements like document, body, and non-interactive SVG.\n * @param {Element} el\n */\n function isValidFocusTarget(el) {\n if (\n el &&\n el !== document &&\n el.nodeName !== 'HTML' &&\n el.nodeName !== 'BODY' &&\n 'classList' in el &&\n 'contains' in el.classList\n ) {\n return true;\n }\n return false;\n }\n\n /**\n * Computes whether the given element should automatically trigger the\n * `focus-visible` class being added, i.e. whether it should always match\n * `:focus-visible` when focused.\n * @param {Element} el\n * @return {boolean}\n */\n function focusTriggersKeyboardModality(el) {\n var type = el.type;\n var tagName = el.tagName;\n\n if (tagName === 'INPUT' && inputTypesAllowlist[type] && !el.readOnly) {\n return true;\n }\n\n if (tagName === 'TEXTAREA' && !el.readOnly) {\n return true;\n }\n\n if (el.isContentEditable) {\n return true;\n }\n\n return false;\n }\n\n /**\n * Add the `focus-visible` class to the given element if it was not added by\n * the author.\n * @param {Element} el\n */\n function addFocusVisibleClass(el) {\n if (el.classList.contains('focus-visible')) {\n return;\n }\n el.classList.add('focus-visible');\n el.setAttribute('data-focus-visible-added', '');\n }\n\n /**\n * Remove the `focus-visible` class from the given element if it was not\n * originally added by the author.\n * @param {Element} el\n */\n function removeFocusVisibleClass(el) {\n if (!el.hasAttribute('data-focus-visible-added')) {\n return;\n }\n el.classList.remove('focus-visible');\n el.removeAttribute('data-focus-visible-added');\n }\n\n /**\n * If the most recent user interaction was via the keyboard;\n * and the key press did not include a meta, alt/option, or control key;\n * then the modality is keyboard. Otherwise, the modality is not keyboard.\n * Apply `focus-visible` to any current active element and keep track\n * of our keyboard modality state with `hadKeyboardEvent`.\n * @param {KeyboardEvent} e\n */\n function onKeyDown(e) {\n if (e.metaKey || e.altKey || e.ctrlKey) {\n return;\n }\n\n if (isValidFocusTarget(scope.activeElement)) {\n addFocusVisibleClass(scope.activeElement);\n }\n\n hadKeyboardEvent = true;\n }\n\n /**\n * If at any point a user clicks with a pointing device, ensure that we change\n * the modality away from keyboard.\n * This avoids the situation where a user presses a key on an already focused\n * element, and then clicks on a different element, focusing it with a\n * pointing device, while we still think we're in keyboard modality.\n * @param {Event} e\n */\n function onPointerDown(e) {\n hadKeyboardEvent = false;\n }\n\n /**\n * On `focus`, add the `focus-visible` class to the target if:\n * - the target received focus as a result of keyboard navigation, or\n * - the event target is an element that will likely require interaction\n * via the keyboard (e.g. a text box)\n * @param {Event} e\n */\n function onFocus(e) {\n // Prevent IE from focusing the document or HTML element.\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (hadKeyboardEvent || focusTriggersKeyboardModality(e.target)) {\n addFocusVisibleClass(e.target);\n }\n }\n\n /**\n * On `blur`, remove the `focus-visible` class from the target.\n * @param {Event} e\n */\n function onBlur(e) {\n if (!isValidFocusTarget(e.target)) {\n return;\n }\n\n if (\n e.target.classList.contains('focus-visible') ||\n e.target.hasAttribute('data-focus-visible-added')\n ) {\n // To detect a tab/window switch, we look for a blur event followed\n // rapidly by a visibility change.\n // If we don't see a visibility change within 100ms, it's probably a\n // regular focus change.\n hadFocusVisibleRecently = true;\n window.clearTimeout(hadFocusVisibleRecentlyTimeout);\n hadFocusVisibleRecentlyTimeout = window.setTimeout(function() {\n hadFocusVisibleRecently = false;\n }, 100);\n removeFocusVisibleClass(e.target);\n }\n }\n\n /**\n * If the user changes tabs, keep track of whether or not the previously\n * focused element had .focus-visible.\n * @param {Event} e\n */\n function onVisibilityChange(e) {\n if (document.visibilityState === 'hidden') {\n // If the tab becomes active again, the browser will handle calling focus\n // on the element (Safari actually calls it twice).\n // If this tab change caused a blur on an element with focus-visible,\n // re-apply the class when the user switches back to the tab.\n if (hadFocusVisibleRecently) {\n hadKeyboardEvent = true;\n }\n addInitialPointerMoveListeners();\n }\n }\n\n /**\n * Add a group of listeners to detect usage of any pointing devices.\n * These listeners will be added when the polyfill first loads, and anytime\n * the window is blurred, so that they are active when the window regains\n * focus.\n */\n function addInitialPointerMoveListeners() {\n document.addEventListener('mousemove', onInitialPointerMove);\n document.addEventListener('mousedown', onInitialPointerMove);\n document.addEventListener('mouseup', onInitialPointerMove);\n document.addEventListener('pointermove', onInitialPointerMove);\n document.addEventListener('pointerdown', onInitialPointerMove);\n document.addEventListener('pointerup', onInitialPointerMove);\n document.addEventListener('touchmove', onInitialPointerMove);\n document.addEventListener('touchstart', onInitialPointerMove);\n document.addEventListener('touchend', onInitialPointerMove);\n }\n\n function removeInitialPointerMoveListeners() {\n document.removeEventListener('mousemove', onInitialPointerMove);\n document.removeEventListener('mousedown', onInitialPointerMove);\n document.removeEventListener('mouseup', onInitialPointerMove);\n document.removeEventListener('pointermove', onInitialPointerMove);\n document.removeEventListener('pointerdown', onInitialPointerMove);\n document.removeEventListener('pointerup', onInitialPointerMove);\n document.removeEventListener('touchmove', onInitialPointerMove);\n document.removeEventListener('touchstart', onInitialPointerMove);\n document.removeEventListener('touchend', onInitialPointerMove);\n }\n\n /**\n * When the polfyill first loads, assume the user is in keyboard modality.\n * If any event is received from a pointing device (e.g. mouse, pointer,\n * touch), turn off keyboard modality.\n * This accounts for situations where focus enters the page from the URL bar.\n * @param {Event} e\n */\n function onInitialPointerMove(e) {\n // Work around a Safari quirk that fires a mousemove on whenever the\n // window blurs, even if you're tabbing out of the page. \u00AF\\_(\u30C4)_/\u00AF\n if (e.target.nodeName && e.target.nodeName.toLowerCase() === 'html') {\n return;\n }\n\n hadKeyboardEvent = false;\n removeInitialPointerMoveListeners();\n }\n\n // For some kinds of state, we are interested in changes at the global scope\n // only. For example, global pointer input, global key presses and global\n // visibility change should affect the state at every scope:\n document.addEventListener('keydown', onKeyDown, true);\n document.addEventListener('mousedown', onPointerDown, true);\n document.addEventListener('pointerdown', onPointerDown, true);\n document.addEventListener('touchstart', onPointerDown, true);\n document.addEventListener('visibilitychange', onVisibilityChange, true);\n\n addInitialPointerMoveListeners();\n\n // For focus and blur, we specifically care about state changes in the local\n // scope. This is because focus / blur events that originate from within a\n // shadow root are not re-dispatched from the host element if it was already\n // the active element in its own scope:\n scope.addEventListener('focus', onFocus, true);\n scope.addEventListener('blur', onBlur, true);\n\n // We detect that a node is a ShadowRoot by ensuring that it is a\n // DocumentFragment and also has a host property. This check covers native\n // implementation and polyfill implementation transparently. If we only cared\n // about the native implementation, we could just check if the scope was\n // an instance of a ShadowRoot.\n if (scope.nodeType === Node.DOCUMENT_FRAGMENT_NODE && scope.host) {\n // Since a ShadowRoot is a special kind of DocumentFragment, it does not\n // have a root element to add a class to. So, we add this attribute to the\n // host element instead:\n scope.host.setAttribute('data-js-focus-visible', '');\n } else if (scope.nodeType === Node.DOCUMENT_NODE) {\n document.documentElement.classList.add('js-focus-visible');\n document.documentElement.setAttribute('data-js-focus-visible', '');\n }\n }\n\n // It is important to wrap all references to global window and document in\n // these checks to support server-side rendering use cases\n // @see https://github.com/WICG/focus-visible/issues/199\n if (typeof window !== 'undefined' && typeof document !== 'undefined') {\n // Make the polyfill helper globally available. This can be used as a signal\n // to interested libraries that wish to coordinate with the polyfill for e.g.,\n // applying the polyfill to a shadow root:\n window.applyFocusVisiblePolyfill = applyFocusVisiblePolyfill;\n\n // Notify interested libraries of the polyfill's presence, in case the\n // polyfill was loaded lazily:\n var event;\n\n try {\n event = new CustomEvent('focus-visible-polyfill-ready');\n } catch (error) {\n // IE11 does not support using CustomEvent as a constructor directly:\n event = document.createEvent('CustomEvent');\n event.initCustomEvent('focus-visible-polyfill-ready', false, false, {});\n }\n\n window.dispatchEvent(event);\n }\n\n if (typeof document !== 'undefined') {\n // Apply the polyfill to the global document, so that no JavaScript\n // coordination is required to use the polyfill in the top-level document:\n applyFocusVisiblePolyfill(document);\n }\n\n})));\n", "/*!\n * clipboard.js v2.0.11\n * https://clipboardjs.com/\n *\n * Licensed MIT \u00A9 Zeno Rocha\n */\n(function webpackUniversalModuleDefinition(root, factory) {\n\tif(typeof exports === 'object' && typeof module === 'object')\n\t\tmodule.exports = factory();\n\telse if(typeof define === 'function' && define.amd)\n\t\tdefine([], factory);\n\telse if(typeof exports === 'object')\n\t\texports[\"ClipboardJS\"] = factory();\n\telse\n\t\troot[\"ClipboardJS\"] = factory();\n})(this, function() {\nreturn /******/ (function() { // webpackBootstrap\n/******/ \tvar __webpack_modules__ = ({\n\n/***/ 686:\n/***/ (function(__unused_webpack_module, __webpack_exports__, __webpack_require__) {\n\n\"use strict\";\n\n// EXPORTS\n__webpack_require__.d(__webpack_exports__, {\n \"default\": function() { return /* binding */ clipboard; }\n});\n\n// EXTERNAL MODULE: ./node_modules/tiny-emitter/index.js\nvar tiny_emitter = __webpack_require__(279);\nvar tiny_emitter_default = /*#__PURE__*/__webpack_require__.n(tiny_emitter);\n// EXTERNAL MODULE: ./node_modules/good-listener/src/listen.js\nvar listen = __webpack_require__(370);\nvar listen_default = /*#__PURE__*/__webpack_require__.n(listen);\n// EXTERNAL MODULE: ./node_modules/select/src/select.js\nvar src_select = __webpack_require__(817);\nvar select_default = /*#__PURE__*/__webpack_require__.n(src_select);\n;// CONCATENATED MODULE: ./src/common/command.js\n/**\n * Executes a given operation type.\n * @param {String} type\n * @return {Boolean}\n */\nfunction command(type) {\n try {\n return document.execCommand(type);\n } catch (err) {\n return false;\n }\n}\n;// CONCATENATED MODULE: ./src/actions/cut.js\n\n\n/**\n * Cut action wrapper.\n * @param {String|HTMLElement} target\n * @return {String}\n */\n\nvar ClipboardActionCut = function ClipboardActionCut(target) {\n var selectedText = select_default()(target);\n command('cut');\n return selectedText;\n};\n\n/* harmony default export */ var actions_cut = (ClipboardActionCut);\n;// CONCATENATED MODULE: ./src/common/create-fake-element.js\n/**\n * Creates a fake textarea element with a value.\n * @param {String} value\n * @return {HTMLElement}\n */\nfunction createFakeElement(value) {\n var isRTL = document.documentElement.getAttribute('dir') === 'rtl';\n var fakeElement = document.createElement('textarea'); // Prevent zooming on iOS\n\n fakeElement.style.fontSize = '12pt'; // Reset box model\n\n fakeElement.style.border = '0';\n fakeElement.style.padding = '0';\n fakeElement.style.margin = '0'; // Move element out of screen horizontally\n\n fakeElement.style.position = 'absolute';\n fakeElement.style[isRTL ? 'right' : 'left'] = '-9999px'; // Move element to the same position vertically\n\n var yPosition = window.pageYOffset || document.documentElement.scrollTop;\n fakeElement.style.top = \"\".concat(yPosition, \"px\");\n fakeElement.setAttribute('readonly', '');\n fakeElement.value = value;\n return fakeElement;\n}\n;// CONCATENATED MODULE: ./src/actions/copy.js\n\n\n\n/**\n * Create fake copy action wrapper using a fake element.\n * @param {String} target\n * @param {Object} options\n * @return {String}\n */\n\nvar fakeCopyAction = function fakeCopyAction(value, options) {\n var fakeElement = createFakeElement(value);\n options.container.appendChild(fakeElement);\n var selectedText = select_default()(fakeElement);\n command('copy');\n fakeElement.remove();\n return selectedText;\n};\n/**\n * Copy action wrapper.\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @return {String}\n */\n\n\nvar ClipboardActionCopy = function ClipboardActionCopy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n var selectedText = '';\n\n if (typeof target === 'string') {\n selectedText = fakeCopyAction(target, options);\n } else if (target instanceof HTMLInputElement && !['text', 'search', 'url', 'tel', 'password'].includes(target === null || target === void 0 ? void 0 : target.type)) {\n // If input type doesn't support `setSelectionRange`. Simulate it. https://developer.mozilla.org/en-US/docs/Web/API/HTMLInputElement/setSelectionRange\n selectedText = fakeCopyAction(target.value, options);\n } else {\n selectedText = select_default()(target);\n command('copy');\n }\n\n return selectedText;\n};\n\n/* harmony default export */ var actions_copy = (ClipboardActionCopy);\n;// CONCATENATED MODULE: ./src/actions/default.js\nfunction _typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return _typeof(obj); }\n\n\n\n/**\n * Inner function which performs selection from either `text` or `target`\n * properties and then executes copy or cut operations.\n * @param {Object} options\n */\n\nvar ClipboardActionDefault = function ClipboardActionDefault() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n // Defines base properties passed from constructor.\n var _options$action = options.action,\n action = _options$action === void 0 ? 'copy' : _options$action,\n container = options.container,\n target = options.target,\n text = options.text; // Sets the `action` to be performed which can be either 'copy' or 'cut'.\n\n if (action !== 'copy' && action !== 'cut') {\n throw new Error('Invalid \"action\" value, use either \"copy\" or \"cut\"');\n } // Sets the `target` property using an element that will be have its content copied.\n\n\n if (target !== undefined) {\n if (target && _typeof(target) === 'object' && target.nodeType === 1) {\n if (action === 'copy' && target.hasAttribute('disabled')) {\n throw new Error('Invalid \"target\" attribute. Please use \"readonly\" instead of \"disabled\" attribute');\n }\n\n if (action === 'cut' && (target.hasAttribute('readonly') || target.hasAttribute('disabled'))) {\n throw new Error('Invalid \"target\" attribute. You can\\'t cut text from elements with \"readonly\" or \"disabled\" attributes');\n }\n } else {\n throw new Error('Invalid \"target\" value, use a valid Element');\n }\n } // Define selection strategy based on `text` property.\n\n\n if (text) {\n return actions_copy(text, {\n container: container\n });\n } // Defines which selection strategy based on `target` property.\n\n\n if (target) {\n return action === 'cut' ? actions_cut(target) : actions_copy(target, {\n container: container\n });\n }\n};\n\n/* harmony default export */ var actions_default = (ClipboardActionDefault);\n;// CONCATENATED MODULE: ./src/clipboard.js\nfunction clipboard_typeof(obj) { \"@babel/helpers - typeof\"; if (typeof Symbol === \"function\" && typeof Symbol.iterator === \"symbol\") { clipboard_typeof = function _typeof(obj) { return typeof obj; }; } else { clipboard_typeof = function _typeof(obj) { return obj && typeof Symbol === \"function\" && obj.constructor === Symbol && obj !== Symbol.prototype ? \"symbol\" : typeof obj; }; } return clipboard_typeof(obj); }\n\nfunction _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError(\"Cannot call a class as a function\"); } }\n\nfunction _defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if (\"value\" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } }\n\nfunction _createClass(Constructor, protoProps, staticProps) { if (protoProps) _defineProperties(Constructor.prototype, protoProps); if (staticProps) _defineProperties(Constructor, staticProps); return Constructor; }\n\nfunction _inherits(subClass, superClass) { if (typeof superClass !== \"function\" && superClass !== null) { throw new TypeError(\"Super expression must either be null or a function\"); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, writable: true, configurable: true } }); if (superClass) _setPrototypeOf(subClass, superClass); }\n\nfunction _setPrototypeOf(o, p) { _setPrototypeOf = Object.setPrototypeOf || function _setPrototypeOf(o, p) { o.__proto__ = p; return o; }; return _setPrototypeOf(o, p); }\n\nfunction _createSuper(Derived) { var hasNativeReflectConstruct = _isNativeReflectConstruct(); return function _createSuperInternal() { var Super = _getPrototypeOf(Derived), result; if (hasNativeReflectConstruct) { var NewTarget = _getPrototypeOf(this).constructor; result = Reflect.construct(Super, arguments, NewTarget); } else { result = Super.apply(this, arguments); } return _possibleConstructorReturn(this, result); }; }\n\nfunction _possibleConstructorReturn(self, call) { if (call && (clipboard_typeof(call) === \"object\" || typeof call === \"function\")) { return call; } return _assertThisInitialized(self); }\n\nfunction _assertThisInitialized(self) { if (self === void 0) { throw new ReferenceError(\"this hasn't been initialised - super() hasn't been called\"); } return self; }\n\nfunction _isNativeReflectConstruct() { if (typeof Reflect === \"undefined\" || !Reflect.construct) return false; if (Reflect.construct.sham) return false; if (typeof Proxy === \"function\") return true; try { Date.prototype.toString.call(Reflect.construct(Date, [], function () {})); return true; } catch (e) { return false; } }\n\nfunction _getPrototypeOf(o) { _getPrototypeOf = Object.setPrototypeOf ? Object.getPrototypeOf : function _getPrototypeOf(o) { return o.__proto__ || Object.getPrototypeOf(o); }; return _getPrototypeOf(o); }\n\n\n\n\n\n\n/**\n * Helper function to retrieve attribute value.\n * @param {String} suffix\n * @param {Element} element\n */\n\nfunction getAttributeValue(suffix, element) {\n var attribute = \"data-clipboard-\".concat(suffix);\n\n if (!element.hasAttribute(attribute)) {\n return;\n }\n\n return element.getAttribute(attribute);\n}\n/**\n * Base class which takes one or more elements, adds event listeners to them,\n * and instantiates a new `ClipboardAction` on each click.\n */\n\n\nvar Clipboard = /*#__PURE__*/function (_Emitter) {\n _inherits(Clipboard, _Emitter);\n\n var _super = _createSuper(Clipboard);\n\n /**\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n * @param {Object} options\n */\n function Clipboard(trigger, options) {\n var _this;\n\n _classCallCheck(this, Clipboard);\n\n _this = _super.call(this);\n\n _this.resolveOptions(options);\n\n _this.listenClick(trigger);\n\n return _this;\n }\n /**\n * Defines if attributes would be resolved using internal setter functions\n * or custom functions that were passed in the constructor.\n * @param {Object} options\n */\n\n\n _createClass(Clipboard, [{\n key: \"resolveOptions\",\n value: function resolveOptions() {\n var options = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : {};\n this.action = typeof options.action === 'function' ? options.action : this.defaultAction;\n this.target = typeof options.target === 'function' ? options.target : this.defaultTarget;\n this.text = typeof options.text === 'function' ? options.text : this.defaultText;\n this.container = clipboard_typeof(options.container) === 'object' ? options.container : document.body;\n }\n /**\n * Adds a click event listener to the passed trigger.\n * @param {String|HTMLElement|HTMLCollection|NodeList} trigger\n */\n\n }, {\n key: \"listenClick\",\n value: function listenClick(trigger) {\n var _this2 = this;\n\n this.listener = listen_default()(trigger, 'click', function (e) {\n return _this2.onClick(e);\n });\n }\n /**\n * Defines a new `ClipboardAction` on each click event.\n * @param {Event} e\n */\n\n }, {\n key: \"onClick\",\n value: function onClick(e) {\n var trigger = e.delegateTarget || e.currentTarget;\n var action = this.action(trigger) || 'copy';\n var text = actions_default({\n action: action,\n container: this.container,\n target: this.target(trigger),\n text: this.text(trigger)\n }); // Fires an event based on the copy operation result.\n\n this.emit(text ? 'success' : 'error', {\n action: action,\n text: text,\n trigger: trigger,\n clearSelection: function clearSelection() {\n if (trigger) {\n trigger.focus();\n }\n\n window.getSelection().removeAllRanges();\n }\n });\n }\n /**\n * Default `action` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultAction\",\n value: function defaultAction(trigger) {\n return getAttributeValue('action', trigger);\n }\n /**\n * Default `target` lookup function.\n * @param {Element} trigger\n */\n\n }, {\n key: \"defaultTarget\",\n value: function defaultTarget(trigger) {\n var selector = getAttributeValue('target', trigger);\n\n if (selector) {\n return document.querySelector(selector);\n }\n }\n /**\n * Allow fire programmatically a copy action\n * @param {String|HTMLElement} target\n * @param {Object} options\n * @returns Text copied.\n */\n\n }, {\n key: \"defaultText\",\n\n /**\n * Default `text` lookup function.\n * @param {Element} trigger\n */\n value: function defaultText(trigger) {\n return getAttributeValue('text', trigger);\n }\n /**\n * Destroy lifecycle.\n */\n\n }, {\n key: \"destroy\",\n value: function destroy() {\n this.listener.destroy();\n }\n }], [{\n key: \"copy\",\n value: function copy(target) {\n var options = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : {\n container: document.body\n };\n return actions_copy(target, options);\n }\n /**\n * Allow fire programmatically a cut action\n * @param {String|HTMLElement} target\n * @returns Text cutted.\n */\n\n }, {\n key: \"cut\",\n value: function cut(target) {\n return actions_cut(target);\n }\n /**\n * Returns the support of the given action, or all actions if no action is\n * given.\n * @param {String} [action]\n */\n\n }, {\n key: \"isSupported\",\n value: function isSupported() {\n var action = arguments.length > 0 && arguments[0] !== undefined ? arguments[0] : ['copy', 'cut'];\n var actions = typeof action === 'string' ? [action] : action;\n var support = !!document.queryCommandSupported;\n actions.forEach(function (action) {\n support = support && !!document.queryCommandSupported(action);\n });\n return support;\n }\n }]);\n\n return Clipboard;\n}((tiny_emitter_default()));\n\n/* harmony default export */ var clipboard = (Clipboard);\n\n/***/ }),\n\n/***/ 828:\n/***/ (function(module) {\n\nvar DOCUMENT_NODE_TYPE = 9;\n\n/**\n * A polyfill for Element.matches()\n */\nif (typeof Element !== 'undefined' && !Element.prototype.matches) {\n var proto = Element.prototype;\n\n proto.matches = proto.matchesSelector ||\n proto.mozMatchesSelector ||\n proto.msMatchesSelector ||\n proto.oMatchesSelector ||\n proto.webkitMatchesSelector;\n}\n\n/**\n * Finds the closest parent that matches a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @return {Function}\n */\nfunction closest (element, selector) {\n while (element && element.nodeType !== DOCUMENT_NODE_TYPE) {\n if (typeof element.matches === 'function' &&\n element.matches(selector)) {\n return element;\n }\n element = element.parentNode;\n }\n}\n\nmodule.exports = closest;\n\n\n/***/ }),\n\n/***/ 438:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar closest = __webpack_require__(828);\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction _delegate(element, selector, type, callback, useCapture) {\n var listenerFn = listener.apply(this, arguments);\n\n element.addEventListener(type, listenerFn, useCapture);\n\n return {\n destroy: function() {\n element.removeEventListener(type, listenerFn, useCapture);\n }\n }\n}\n\n/**\n * Delegates event to a selector.\n *\n * @param {Element|String|Array} [elements]\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @param {Boolean} useCapture\n * @return {Object}\n */\nfunction delegate(elements, selector, type, callback, useCapture) {\n // Handle the regular Element usage\n if (typeof elements.addEventListener === 'function') {\n return _delegate.apply(null, arguments);\n }\n\n // Handle Element-less usage, it defaults to global delegation\n if (typeof type === 'function') {\n // Use `document` as the first parameter, then apply arguments\n // This is a short way to .unshift `arguments` without running into deoptimizations\n return _delegate.bind(null, document).apply(null, arguments);\n }\n\n // Handle Selector-based usage\n if (typeof elements === 'string') {\n elements = document.querySelectorAll(elements);\n }\n\n // Handle Array-like based usage\n return Array.prototype.map.call(elements, function (element) {\n return _delegate(element, selector, type, callback, useCapture);\n });\n}\n\n/**\n * Finds closest match and invokes callback.\n *\n * @param {Element} element\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Function}\n */\nfunction listener(element, selector, type, callback) {\n return function(e) {\n e.delegateTarget = closest(e.target, selector);\n\n if (e.delegateTarget) {\n callback.call(element, e);\n }\n }\n}\n\nmodule.exports = delegate;\n\n\n/***/ }),\n\n/***/ 879:\n/***/ (function(__unused_webpack_module, exports) {\n\n/**\n * Check if argument is a HTML element.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.node = function(value) {\n return value !== undefined\n && value instanceof HTMLElement\n && value.nodeType === 1;\n};\n\n/**\n * Check if argument is a list of HTML elements.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.nodeList = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return value !== undefined\n && (type === '[object NodeList]' || type === '[object HTMLCollection]')\n && ('length' in value)\n && (value.length === 0 || exports.node(value[0]));\n};\n\n/**\n * Check if argument is a string.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.string = function(value) {\n return typeof value === 'string'\n || value instanceof String;\n};\n\n/**\n * Check if argument is a function.\n *\n * @param {Object} value\n * @return {Boolean}\n */\nexports.fn = function(value) {\n var type = Object.prototype.toString.call(value);\n\n return type === '[object Function]';\n};\n\n\n/***/ }),\n\n/***/ 370:\n/***/ (function(module, __unused_webpack_exports, __webpack_require__) {\n\nvar is = __webpack_require__(879);\nvar delegate = __webpack_require__(438);\n\n/**\n * Validates all params and calls the right\n * listener function based on its target type.\n *\n * @param {String|HTMLElement|HTMLCollection|NodeList} target\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listen(target, type, callback) {\n if (!target && !type && !callback) {\n throw new Error('Missing required arguments');\n }\n\n if (!is.string(type)) {\n throw new TypeError('Second argument must be a String');\n }\n\n if (!is.fn(callback)) {\n throw new TypeError('Third argument must be a Function');\n }\n\n if (is.node(target)) {\n return listenNode(target, type, callback);\n }\n else if (is.nodeList(target)) {\n return listenNodeList(target, type, callback);\n }\n else if (is.string(target)) {\n return listenSelector(target, type, callback);\n }\n else {\n throw new TypeError('First argument must be a String, HTMLElement, HTMLCollection, or NodeList');\n }\n}\n\n/**\n * Adds an event listener to a HTML element\n * and returns a remove listener function.\n *\n * @param {HTMLElement} node\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNode(node, type, callback) {\n node.addEventListener(type, callback);\n\n return {\n destroy: function() {\n node.removeEventListener(type, callback);\n }\n }\n}\n\n/**\n * Add an event listener to a list of HTML elements\n * and returns a remove listener function.\n *\n * @param {NodeList|HTMLCollection} nodeList\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenNodeList(nodeList, type, callback) {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.addEventListener(type, callback);\n });\n\n return {\n destroy: function() {\n Array.prototype.forEach.call(nodeList, function(node) {\n node.removeEventListener(type, callback);\n });\n }\n }\n}\n\n/**\n * Add an event listener to a selector\n * and returns a remove listener function.\n *\n * @param {String} selector\n * @param {String} type\n * @param {Function} callback\n * @return {Object}\n */\nfunction listenSelector(selector, type, callback) {\n return delegate(document.body, selector, type, callback);\n}\n\nmodule.exports = listen;\n\n\n/***/ }),\n\n/***/ 817:\n/***/ (function(module) {\n\nfunction select(element) {\n var selectedText;\n\n if (element.nodeName === 'SELECT') {\n element.focus();\n\n selectedText = element.value;\n }\n else if (element.nodeName === 'INPUT' || element.nodeName === 'TEXTAREA') {\n var isReadOnly = element.hasAttribute('readonly');\n\n if (!isReadOnly) {\n element.setAttribute('readonly', '');\n }\n\n element.select();\n element.setSelectionRange(0, element.value.length);\n\n if (!isReadOnly) {\n element.removeAttribute('readonly');\n }\n\n selectedText = element.value;\n }\n else {\n if (element.hasAttribute('contenteditable')) {\n element.focus();\n }\n\n var selection = window.getSelection();\n var range = document.createRange();\n\n range.selectNodeContents(element);\n selection.removeAllRanges();\n selection.addRange(range);\n\n selectedText = selection.toString();\n }\n\n return selectedText;\n}\n\nmodule.exports = select;\n\n\n/***/ }),\n\n/***/ 279:\n/***/ (function(module) {\n\nfunction E () {\n // Keep this empty so it's easier to inherit from\n // (via https://github.com/lipsmack from https://github.com/scottcorgan/tiny-emitter/issues/3)\n}\n\nE.prototype = {\n on: function (name, callback, ctx) {\n var e = this.e || (this.e = {});\n\n (e[name] || (e[name] = [])).push({\n fn: callback,\n ctx: ctx\n });\n\n return this;\n },\n\n once: function (name, callback, ctx) {\n var self = this;\n function listener () {\n self.off(name, listener);\n callback.apply(ctx, arguments);\n };\n\n listener._ = callback\n return this.on(name, listener, ctx);\n },\n\n emit: function (name) {\n var data = [].slice.call(arguments, 1);\n var evtArr = ((this.e || (this.e = {}))[name] || []).slice();\n var i = 0;\n var len = evtArr.length;\n\n for (i; i < len; i++) {\n evtArr[i].fn.apply(evtArr[i].ctx, data);\n }\n\n return this;\n },\n\n off: function (name, callback) {\n var e = this.e || (this.e = {});\n var evts = e[name];\n var liveEvents = [];\n\n if (evts && callback) {\n for (var i = 0, len = evts.length; i < len; i++) {\n if (evts[i].fn !== callback && evts[i].fn._ !== callback)\n liveEvents.push(evts[i]);\n }\n }\n\n // Remove event from queue to prevent memory leak\n // Suggested by https://github.com/lazd\n // Ref: https://github.com/scottcorgan/tiny-emitter/commit/c6ebfaa9bc973b33d110a84a307742b7cf94c953#commitcomment-5024910\n\n (liveEvents.length)\n ? e[name] = liveEvents\n : delete e[name];\n\n return this;\n }\n};\n\nmodule.exports = E;\nmodule.exports.TinyEmitter = E;\n\n\n/***/ })\n\n/******/ \t});\n/************************************************************************/\n/******/ \t// The module cache\n/******/ \tvar __webpack_module_cache__ = {};\n/******/ \t\n/******/ \t// The require function\n/******/ \tfunction __webpack_require__(moduleId) {\n/******/ \t\t// Check if module is in cache\n/******/ \t\tif(__webpack_module_cache__[moduleId]) {\n/******/ \t\t\treturn __webpack_module_cache__[moduleId].exports;\n/******/ \t\t}\n/******/ \t\t// Create a new module (and put it into the cache)\n/******/ \t\tvar module = __webpack_module_cache__[moduleId] = {\n/******/ \t\t\t// no module.id needed\n/******/ \t\t\t// no module.loaded needed\n/******/ \t\t\texports: {}\n/******/ \t\t};\n/******/ \t\n/******/ \t\t// Execute the module function\n/******/ \t\t__webpack_modules__[moduleId](module, module.exports, __webpack_require__);\n/******/ \t\n/******/ \t\t// Return the exports of the module\n/******/ \t\treturn module.exports;\n/******/ \t}\n/******/ \t\n/************************************************************************/\n/******/ \t/* webpack/runtime/compat get default export */\n/******/ \t!function() {\n/******/ \t\t// getDefaultExport function for compatibility with non-harmony modules\n/******/ \t\t__webpack_require__.n = function(module) {\n/******/ \t\t\tvar getter = module && module.__esModule ?\n/******/ \t\t\t\tfunction() { return module['default']; } :\n/******/ \t\t\t\tfunction() { return module; };\n/******/ \t\t\t__webpack_require__.d(getter, { a: getter });\n/******/ \t\t\treturn getter;\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/define property getters */\n/******/ \t!function() {\n/******/ \t\t// define getter functions for harmony exports\n/******/ \t\t__webpack_require__.d = function(exports, definition) {\n/******/ \t\t\tfor(var key in definition) {\n/******/ \t\t\t\tif(__webpack_require__.o(definition, key) && !__webpack_require__.o(exports, key)) {\n/******/ \t\t\t\t\tObject.defineProperty(exports, key, { enumerable: true, get: definition[key] });\n/******/ \t\t\t\t}\n/******/ \t\t\t}\n/******/ \t\t};\n/******/ \t}();\n/******/ \t\n/******/ \t/* webpack/runtime/hasOwnProperty shorthand */\n/******/ \t!function() {\n/******/ \t\t__webpack_require__.o = function(obj, prop) { return Object.prototype.hasOwnProperty.call(obj, prop); }\n/******/ \t}();\n/******/ \t\n/************************************************************************/\n/******/ \t// module exports must be returned from runtime so entry inlining is disabled\n/******/ \t// startup\n/******/ \t// Load entry module and return exports\n/******/ \treturn __webpack_require__(686);\n/******/ })()\n.default;\n});", "/*!\n * escape-html\n * Copyright(c) 2012-2013 TJ Holowaychuk\n * Copyright(c) 2015 Andreas Lubbe\n * Copyright(c) 2015 Tiancheng \"Timothy\" Gu\n * MIT Licensed\n */\n\n'use strict';\n\n/**\n * Module variables.\n * @private\n */\n\nvar matchHtmlRegExp = /[\"'&<>]/;\n\n/**\n * Module exports.\n * @public\n */\n\nmodule.exports = escapeHtml;\n\n/**\n * Escape special characters in the given string of html.\n *\n * @param {string} string The string to escape for inserting into HTML\n * @return {string}\n * @public\n */\n\nfunction escapeHtml(string) {\n var str = '' + string;\n var match = matchHtmlRegExp.exec(str);\n\n if (!match) {\n return str;\n }\n\n var escape;\n var html = '';\n var index = 0;\n var lastIndex = 0;\n\n for (index = match.index; index < str.length; index++) {\n switch (str.charCodeAt(index)) {\n case 34: // \"\n escape = '"';\n break;\n case 38: // &\n escape = '&';\n break;\n case 39: // '\n escape = ''';\n break;\n case 60: // <\n escape = '<';\n break;\n case 62: // >\n escape = '>';\n break;\n default:\n continue;\n }\n\n if (lastIndex !== index) {\n html += str.substring(lastIndex, index);\n }\n\n lastIndex = index + 1;\n html += escape;\n }\n\n return lastIndex !== index\n ? html + str.substring(lastIndex, index)\n : html;\n}\n", "/*\n * Copyright (c) 2016-2024 Martin Donath \n *\n * Permission is hereby granted, free of charge, to any person obtaining a copy\n * of this software and associated documentation files (the \"Software\"), to\n * deal in the Software without restriction, including without limitation the\n * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n * sell copies of the Software, and to permit persons to whom the Software is\n * furnished to do so, subject to the following conditions:\n *\n * The above copyright notice and this permission notice shall be included in\n * all copies or substantial portions of the Software.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL THE\n * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n * IN THE SOFTWARE.\n */\n\nimport \"focus-visible\"\n\nimport {\n EMPTY,\n NEVER,\n Observable,\n Subject,\n defer,\n delay,\n filter,\n map,\n merge,\n mergeWith,\n shareReplay,\n switchMap\n} from \"rxjs\"\n\nimport { configuration, feature } from \"./_\"\nimport {\n at,\n getActiveElement,\n getOptionalElement,\n requestJSON,\n setLocation,\n setToggle,\n watchDocument,\n watchKeyboard,\n watchLocation,\n watchLocationTarget,\n watchMedia,\n watchPrint,\n watchScript,\n watchViewport\n} from \"./browser\"\nimport {\n getComponentElement,\n getComponentElements,\n mountAnnounce,\n mountBackToTop,\n mountConsent,\n mountContent,\n mountDialog,\n mountHeader,\n mountHeaderTitle,\n mountPalette,\n mountProgress,\n mountSearch,\n mountSearchHiglight,\n mountSidebar,\n mountSource,\n mountTableOfContents,\n mountTabs,\n watchHeader,\n watchMain\n} from \"./components\"\nimport {\n SearchIndex,\n setupClipboardJS,\n setupInstantNavigation,\n setupVersionSelector\n} from \"./integrations\"\nimport {\n patchEllipsis,\n patchIndeterminate,\n patchScrollfix,\n patchScrolllock\n} from \"./patches\"\nimport \"./polyfills\"\n\n/* ----------------------------------------------------------------------------\n * Functions - @todo refactor\n * ------------------------------------------------------------------------- */\n\n/**\n * Fetch search index\n *\n * @returns Search index observable\n */\nfunction fetchSearchIndex(): Observable {\n if (location.protocol === \"file:\") {\n return watchScript(\n `${new URL(\"search/search_index.js\", config.base)}`\n )\n .pipe(\n // @ts-ignore - @todo fix typings\n map(() => __index),\n shareReplay(1)\n )\n } else {\n return requestJSON(\n new URL(\"search/search_index.json\", config.base)\n )\n }\n}\n\n/* ----------------------------------------------------------------------------\n * Application\n * ------------------------------------------------------------------------- */\n\n/* Yay, JavaScript is available */\ndocument.documentElement.classList.remove(\"no-js\")\ndocument.documentElement.classList.add(\"js\")\n\n/* Set up navigation observables and subjects */\nconst document$ = watchDocument()\nconst location$ = watchLocation()\nconst target$ = watchLocationTarget(location$)\nconst keyboard$ = watchKeyboard()\n\n/* Set up media observables */\nconst viewport$ = watchViewport()\nconst tablet$ = watchMedia(\"(min-width: 960px)\")\nconst screen$ = watchMedia(\"(min-width: 1220px)\")\nconst print$ = watchPrint()\n\n/* Retrieve search index, if search is enabled */\nconst config = configuration()\nconst index$ = document.forms.namedItem(\"search\")\n ? fetchSearchIndex()\n : NEVER\n\n/* Set up Clipboard.js integration */\nconst alert$ = new Subject()\nsetupClipboardJS({ alert$ })\n\n/* Set up progress indicator */\nconst progress$ = new Subject()\n\n/* Set up instant navigation, if enabled */\nif (feature(\"navigation.instant\"))\n setupInstantNavigation({ location$, viewport$, progress$ })\n .subscribe(document$)\n\n/* Set up version selector */\nif (config.version?.provider === \"mike\")\n setupVersionSelector({ document$ })\n\n/* Always close drawer and search on navigation */\nmerge(location$, target$)\n .pipe(\n delay(125)\n )\n .subscribe(() => {\n setToggle(\"drawer\", false)\n setToggle(\"search\", false)\n })\n\n/* Set up global keyboard handlers */\nkeyboard$\n .pipe(\n filter(({ mode }) => mode === \"global\")\n )\n .subscribe(key => {\n switch (key.type) {\n\n /* Go to previous page */\n case \"p\":\n case \",\":\n const prev = getOptionalElement(\"link[rel=prev]\")\n if (typeof prev !== \"undefined\")\n setLocation(prev)\n break\n\n /* Go to next page */\n case \"n\":\n case \".\":\n const next = getOptionalElement(\"link[rel=next]\")\n if (typeof next !== \"undefined\")\n setLocation(next)\n break\n\n /* Expand navigation, see https://bit.ly/3ZjG5io */\n case \"Enter\":\n const active = getActiveElement()\n if (active instanceof HTMLLabelElement)\n active.click()\n }\n })\n\n/* Set up patches */\npatchEllipsis({ viewport$, document$ })\npatchIndeterminate({ document$, tablet$ })\npatchScrollfix({ document$ })\npatchScrolllock({ viewport$, tablet$ })\n\n/* Set up header and main area observable */\nconst header$ = watchHeader(getComponentElement(\"header\"), { viewport$ })\nconst main$ = document$\n .pipe(\n map(() => getComponentElement(\"main\")),\n switchMap(el => watchMain(el, { viewport$, header$ })),\n shareReplay(1)\n )\n\n/* Set up control component observables */\nconst control$ = merge(\n\n /* Consent */\n ...getComponentElements(\"consent\")\n .map(el => mountConsent(el, { target$ })),\n\n /* Dialog */\n ...getComponentElements(\"dialog\")\n .map(el => mountDialog(el, { alert$ })),\n\n /* Header */\n ...getComponentElements(\"header\")\n .map(el => mountHeader(el, { viewport$, header$, main$ })),\n\n /* Color palette */\n ...getComponentElements(\"palette\")\n .map(el => mountPalette(el)),\n\n /* Progress bar */\n ...getComponentElements(\"progress\")\n .map(el => mountProgress(el, { progress$ })),\n\n /* Search */\n ...getComponentElements(\"search\")\n .map(el => mountSearch(el, { index$, keyboard$ })),\n\n /* Repository information */\n ...getComponentElements(\"source\")\n .map(el => mountSource(el))\n)\n\n/* Set up content component observables */\nconst content$ = defer(() => merge(\n\n /* Announcement bar */\n ...getComponentElements(\"announce\")\n .map(el => mountAnnounce(el)),\n\n /* Content */\n ...getComponentElements(\"content\")\n .map(el => mountContent(el, { viewport$, target$, print$ })),\n\n /* Search highlighting */\n ...getComponentElements(\"content\")\n .map(el => feature(\"search.highlight\")\n ? mountSearchHiglight(el, { index$, location$ })\n : EMPTY\n ),\n\n /* Header title */\n ...getComponentElements(\"header-title\")\n .map(el => mountHeaderTitle(el, { viewport$, header$ })),\n\n /* Sidebar */\n ...getComponentElements(\"sidebar\")\n .map(el => el.getAttribute(\"data-md-type\") === \"navigation\"\n ? at(screen$, () => mountSidebar(el, { viewport$, header$, main$ }))\n : at(tablet$, () => mountSidebar(el, { viewport$, header$, main$ }))\n ),\n\n /* Navigation tabs */\n ...getComponentElements(\"tabs\")\n .map(el => mountTabs(el, { viewport$, header$ })),\n\n /* Table of contents */\n ...getComponentElements(\"toc\")\n .map(el => mountTableOfContents(el, {\n viewport$, header$, main$, target$\n })),\n\n /* Back-to-top button */\n ...getComponentElements(\"top\")\n .map(el => mountBackToTop(el, { viewport$, header$, main$, target$ }))\n))\n\n/* Set up component observables */\nconst component$ = document$\n .pipe(\n switchMap(() => content$),\n mergeWith(control$),\n shareReplay(1)\n )\n\n/* Subscribe to all components */\ncomponent$.subscribe()\n\n/* ----------------------------------------------------------------------------\n * Exports\n * ------------------------------------------------------------------------- */\n\nwindow.document$ = document$ /* Document observable */\nwindow.location$ = location$ /* Location subject */\nwindow.target$ = target$ /* Location target observable */\nwindow.keyboard$ = keyboard$ /* Keyboard observable */\nwindow.viewport$ = viewport$ /* Viewport observable */\nwindow.tablet$ = tablet$ /* Media tablet observable */\nwindow.screen$ = screen$ /* Media screen observable */\nwindow.print$ = print$ /* Media print observable */\nwindow.alert$ = alert$ /* Alert subject */\nwindow.progress$ = progress$ /* Progress indicator subject */\nwindow.component$ = component$ /* Component observable */\n", "/*! *****************************************************************************\r\nCopyright (c) Microsoft Corporation.\r\n\r\nPermission to use, copy, modify, and/or distribute this software for any\r\npurpose with or without fee is hereby granted.\r\n\r\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH\r\nREGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY\r\nAND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,\r\nINDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM\r\nLOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR\r\nOTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR\r\nPERFORMANCE OF THIS SOFTWARE.\r\n***************************************************************************** */\r\n/* global Reflect, Promise */\r\n\r\nvar extendStatics = function(d, b) {\r\n extendStatics = Object.setPrototypeOf ||\r\n ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) ||\r\n function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; };\r\n return extendStatics(d, b);\r\n};\r\n\r\nexport function __extends(d, b) {\r\n if (typeof b !== \"function\" && b !== null)\r\n throw new TypeError(\"Class extends value \" + String(b) + \" is not a constructor or null\");\r\n extendStatics(d, b);\r\n function __() { this.constructor = d; }\r\n d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __());\r\n}\r\n\r\nexport var __assign = function() {\r\n __assign = Object.assign || function __assign(t) {\r\n for (var s, i = 1, n = arguments.length; i < n; i++) {\r\n s = arguments[i];\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p];\r\n }\r\n return t;\r\n }\r\n return __assign.apply(this, arguments);\r\n}\r\n\r\nexport function __rest(s, e) {\r\n var t = {};\r\n for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0)\r\n t[p] = s[p];\r\n if (s != null && typeof Object.getOwnPropertySymbols === \"function\")\r\n for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) {\r\n if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i]))\r\n t[p[i]] = s[p[i]];\r\n }\r\n return t;\r\n}\r\n\r\nexport function __decorate(decorators, target, key, desc) {\r\n var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;\r\n if (typeof Reflect === \"object\" && typeof Reflect.decorate === \"function\") r = Reflect.decorate(decorators, target, key, desc);\r\n else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;\r\n return c > 3 && r && Object.defineProperty(target, key, r), r;\r\n}\r\n\r\nexport function __param(paramIndex, decorator) {\r\n return function (target, key) { decorator(target, key, paramIndex); }\r\n}\r\n\r\nexport function __metadata(metadataKey, metadataValue) {\r\n if (typeof Reflect === \"object\" && typeof Reflect.metadata === \"function\") return Reflect.metadata(metadataKey, metadataValue);\r\n}\r\n\r\nexport function __awaiter(thisArg, _arguments, P, generator) {\r\n function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }\r\n return new (P || (P = Promise))(function (resolve, reject) {\r\n function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }\r\n function rejected(value) { try { step(generator[\"throw\"](value)); } catch (e) { reject(e); } }\r\n function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }\r\n step((generator = generator.apply(thisArg, _arguments || [])).next());\r\n });\r\n}\r\n\r\nexport function __generator(thisArg, body) {\r\n var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g;\r\n return g = { next: verb(0), \"throw\": verb(1), \"return\": verb(2) }, typeof Symbol === \"function\" && (g[Symbol.iterator] = function() { return this; }), g;\r\n function verb(n) { return function (v) { return step([n, v]); }; }\r\n function step(op) {\r\n if (f) throw new TypeError(\"Generator is already executing.\");\r\n while (_) try {\r\n if (f = 1, y && (t = op[0] & 2 ? y[\"return\"] : op[0] ? y[\"throw\"] || ((t = y[\"return\"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t;\r\n if (y = 0, t) op = [op[0] & 2, t.value];\r\n switch (op[0]) {\r\n case 0: case 1: t = op; break;\r\n case 4: _.label++; return { value: op[1], done: false };\r\n case 5: _.label++; y = op[1]; op = [0]; continue;\r\n case 7: op = _.ops.pop(); _.trys.pop(); continue;\r\n default:\r\n if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; }\r\n if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; }\r\n if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; }\r\n if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; }\r\n if (t[2]) _.ops.pop();\r\n _.trys.pop(); continue;\r\n }\r\n op = body.call(thisArg, _);\r\n } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; }\r\n if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true };\r\n }\r\n}\r\n\r\nexport var __createBinding = Object.create ? (function(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n Object.defineProperty(o, k2, { enumerable: true, get: function() { return m[k]; } });\r\n}) : (function(o, m, k, k2) {\r\n if (k2 === undefined) k2 = k;\r\n o[k2] = m[k];\r\n});\r\n\r\nexport function __exportStar(m, o) {\r\n for (var p in m) if (p !== \"default\" && !Object.prototype.hasOwnProperty.call(o, p)) __createBinding(o, m, p);\r\n}\r\n\r\nexport function __values(o) {\r\n var s = typeof Symbol === \"function\" && Symbol.iterator, m = s && o[s], i = 0;\r\n if (m) return m.call(o);\r\n if (o && typeof o.length === \"number\") return {\r\n next: function () {\r\n if (o && i >= o.length) o = void 0;\r\n return { value: o && o[i++], done: !o };\r\n }\r\n };\r\n throw new TypeError(s ? \"Object is not iterable.\" : \"Symbol.iterator is not defined.\");\r\n}\r\n\r\nexport function __read(o, n) {\r\n var m = typeof Symbol === \"function\" && o[Symbol.iterator];\r\n if (!m) return o;\r\n var i = m.call(o), r, ar = [], e;\r\n try {\r\n while ((n === void 0 || n-- > 0) && !(r = i.next()).done) ar.push(r.value);\r\n }\r\n catch (error) { e = { error: error }; }\r\n finally {\r\n try {\r\n if (r && !r.done && (m = i[\"return\"])) m.call(i);\r\n }\r\n finally { if (e) throw e.error; }\r\n }\r\n return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spread() {\r\n for (var ar = [], i = 0; i < arguments.length; i++)\r\n ar = ar.concat(__read(arguments[i]));\r\n return ar;\r\n}\r\n\r\n/** @deprecated */\r\nexport function __spreadArrays() {\r\n for (var s = 0, i = 0, il = arguments.length; i < il; i++) s += arguments[i].length;\r\n for (var r = Array(s), k = 0, i = 0; i < il; i++)\r\n for (var a = arguments[i], j = 0, jl = a.length; j < jl; j++, k++)\r\n r[k] = a[j];\r\n return r;\r\n}\r\n\r\nexport function __spreadArray(to, from, pack) {\r\n if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) {\r\n if (ar || !(i in from)) {\r\n if (!ar) ar = Array.prototype.slice.call(from, 0, i);\r\n ar[i] = from[i];\r\n }\r\n }\r\n return to.concat(ar || Array.prototype.slice.call(from));\r\n}\r\n\r\nexport function __await(v) {\r\n return this instanceof __await ? (this.v = v, this) : new __await(v);\r\n}\r\n\r\nexport function __asyncGenerator(thisArg, _arguments, generator) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var g = generator.apply(thisArg, _arguments || []), i, q = [];\r\n return i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i;\r\n function verb(n) { if (g[n]) i[n] = function (v) { return new Promise(function (a, b) { q.push([n, v, a, b]) > 1 || resume(n, v); }); }; }\r\n function resume(n, v) { try { step(g[n](v)); } catch (e) { settle(q[0][3], e); } }\r\n function step(r) { r.value instanceof __await ? Promise.resolve(r.value.v).then(fulfill, reject) : settle(q[0][2], r); }\r\n function fulfill(value) { resume(\"next\", value); }\r\n function reject(value) { resume(\"throw\", value); }\r\n function settle(f, v) { if (f(v), q.shift(), q.length) resume(q[0][0], q[0][1]); }\r\n}\r\n\r\nexport function __asyncDelegator(o) {\r\n var i, p;\r\n return i = {}, verb(\"next\"), verb(\"throw\", function (e) { throw e; }), verb(\"return\"), i[Symbol.iterator] = function () { return this; }, i;\r\n function verb(n, f) { i[n] = o[n] ? function (v) { return (p = !p) ? { value: __await(o[n](v)), done: n === \"return\" } : f ? f(v) : v; } : f; }\r\n}\r\n\r\nexport function __asyncValues(o) {\r\n if (!Symbol.asyncIterator) throw new TypeError(\"Symbol.asyncIterator is not defined.\");\r\n var m = o[Symbol.asyncIterator], i;\r\n return m ? m.call(o) : (o = typeof __values === \"function\" ? __values(o) : o[Symbol.iterator](), i = {}, verb(\"next\"), verb(\"throw\"), verb(\"return\"), i[Symbol.asyncIterator] = function () { return this; }, i);\r\n function verb(n) { i[n] = o[n] && function (v) { return new Promise(function (resolve, reject) { v = o[n](v), settle(resolve, reject, v.done, v.value); }); }; }\r\n function settle(resolve, reject, d, v) { Promise.resolve(v).then(function(v) { resolve({ value: v, done: d }); }, reject); }\r\n}\r\n\r\nexport function __makeTemplateObject(cooked, raw) {\r\n if (Object.defineProperty) { Object.defineProperty(cooked, \"raw\", { value: raw }); } else { cooked.raw = raw; }\r\n return cooked;\r\n};\r\n\r\nvar __setModuleDefault = Object.create ? (function(o, v) {\r\n Object.defineProperty(o, \"default\", { enumerable: true, value: v });\r\n}) : function(o, v) {\r\n o[\"default\"] = v;\r\n};\r\n\r\nexport function __importStar(mod) {\r\n if (mod && mod.__esModule) return mod;\r\n var result = {};\r\n if (mod != null) for (var k in mod) if (k !== \"default\" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);\r\n __setModuleDefault(result, mod);\r\n return result;\r\n}\r\n\r\nexport function __importDefault(mod) {\r\n return (mod && mod.__esModule) ? mod : { default: mod };\r\n}\r\n\r\nexport function __classPrivateFieldGet(receiver, state, kind, f) {\r\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a getter\");\r\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot read private member from an object whose class did not declare it\");\r\n return kind === \"m\" ? f : kind === \"a\" ? f.call(receiver) : f ? f.value : state.get(receiver);\r\n}\r\n\r\nexport function __classPrivateFieldSet(receiver, state, value, kind, f) {\r\n if (kind === \"m\") throw new TypeError(\"Private method is not writable\");\r\n if (kind === \"a\" && !f) throw new TypeError(\"Private accessor was defined without a setter\");\r\n if (typeof state === \"function\" ? receiver !== state || !f : !state.has(receiver)) throw new TypeError(\"Cannot write private member to an object whose class did not declare it\");\r\n return (kind === \"a\" ? f.call(receiver, value) : f ? f.value = value : state.set(receiver, value)), value;\r\n}\r\n", "/**\n * Returns true if the object is a function.\n * @param value The value to check\n */\nexport function isFunction(value: any): value is (...args: any[]) => any {\n return typeof value === 'function';\n}\n", "/**\n * Used to create Error subclasses until the community moves away from ES5.\n *\n * This is because compiling from TypeScript down to ES5 has issues with subclassing Errors\n * as well as other built-in types: https://github.com/Microsoft/TypeScript/issues/12123\n *\n * @param createImpl A factory function to create the actual constructor implementation. The returned\n * function should be a named function that calls `_super` internally.\n */\nexport function createErrorClass(createImpl: (_super: any) => any): T {\n const _super = (instance: any) => {\n Error.call(instance);\n instance.stack = new Error().stack;\n };\n\n const ctorFunc = createImpl(_super);\n ctorFunc.prototype = Object.create(Error.prototype);\n ctorFunc.prototype.constructor = ctorFunc;\n return ctorFunc;\n}\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface UnsubscriptionError extends Error {\n readonly errors: any[];\n}\n\nexport interface UnsubscriptionErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (errors: any[]): UnsubscriptionError;\n}\n\n/**\n * An error thrown when one or more errors have occurred during the\n * `unsubscribe` of a {@link Subscription}.\n */\nexport const UnsubscriptionError: UnsubscriptionErrorCtor = createErrorClass(\n (_super) =>\n function UnsubscriptionErrorImpl(this: any, errors: (Error | string)[]) {\n _super(this);\n this.message = errors\n ? `${errors.length} errors occurred during unsubscription:\n${errors.map((err, i) => `${i + 1}) ${err.toString()}`).join('\\n ')}`\n : '';\n this.name = 'UnsubscriptionError';\n this.errors = errors;\n }\n);\n", "/**\n * Removes an item from an array, mutating it.\n * @param arr The array to remove the item from\n * @param item The item to remove\n */\nexport function arrRemove(arr: T[] | undefined | null, item: T) {\n if (arr) {\n const index = arr.indexOf(item);\n 0 <= index && arr.splice(index, 1);\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { UnsubscriptionError } from './util/UnsubscriptionError';\nimport { SubscriptionLike, TeardownLogic, Unsubscribable } from './types';\nimport { arrRemove } from './util/arrRemove';\n\n/**\n * Represents a disposable resource, such as the execution of an Observable. A\n * Subscription has one important method, `unsubscribe`, that takes no argument\n * and just disposes the resource held by the subscription.\n *\n * Additionally, subscriptions may be grouped together through the `add()`\n * method, which will attach a child Subscription to the current Subscription.\n * When a Subscription is unsubscribed, all its children (and its grandchildren)\n * will be unsubscribed as well.\n *\n * @class Subscription\n */\nexport class Subscription implements SubscriptionLike {\n /** @nocollapse */\n public static EMPTY = (() => {\n const empty = new Subscription();\n empty.closed = true;\n return empty;\n })();\n\n /**\n * A flag to indicate whether this Subscription has already been unsubscribed.\n */\n public closed = false;\n\n private _parentage: Subscription[] | Subscription | null = null;\n\n /**\n * The list of registered finalizers to execute upon unsubscription. Adding and removing from this\n * list occurs in the {@link #add} and {@link #remove} methods.\n */\n private _finalizers: Exclude[] | null = null;\n\n /**\n * @param initialTeardown A function executed first as part of the finalization\n * process that is kicked off when {@link #unsubscribe} is called.\n */\n constructor(private initialTeardown?: () => void) {}\n\n /**\n * Disposes the resources held by the subscription. May, for instance, cancel\n * an ongoing Observable execution or cancel any other type of work that\n * started when the Subscription was created.\n * @return {void}\n */\n unsubscribe(): void {\n let errors: any[] | undefined;\n\n if (!this.closed) {\n this.closed = true;\n\n // Remove this from it's parents.\n const { _parentage } = this;\n if (_parentage) {\n this._parentage = null;\n if (Array.isArray(_parentage)) {\n for (const parent of _parentage) {\n parent.remove(this);\n }\n } else {\n _parentage.remove(this);\n }\n }\n\n const { initialTeardown: initialFinalizer } = this;\n if (isFunction(initialFinalizer)) {\n try {\n initialFinalizer();\n } catch (e) {\n errors = e instanceof UnsubscriptionError ? e.errors : [e];\n }\n }\n\n const { _finalizers } = this;\n if (_finalizers) {\n this._finalizers = null;\n for (const finalizer of _finalizers) {\n try {\n execFinalizer(finalizer);\n } catch (err) {\n errors = errors ?? [];\n if (err instanceof UnsubscriptionError) {\n errors = [...errors, ...err.errors];\n } else {\n errors.push(err);\n }\n }\n }\n }\n\n if (errors) {\n throw new UnsubscriptionError(errors);\n }\n }\n }\n\n /**\n * Adds a finalizer to this subscription, so that finalization will be unsubscribed/called\n * when this subscription is unsubscribed. If this subscription is already {@link #closed},\n * because it has already been unsubscribed, then whatever finalizer is passed to it\n * will automatically be executed (unless the finalizer itself is also a closed subscription).\n *\n * Closed Subscriptions cannot be added as finalizers to any subscription. Adding a closed\n * subscription to a any subscription will result in no operation. (A noop).\n *\n * Adding a subscription to itself, or adding `null` or `undefined` will not perform any\n * operation at all. (A noop).\n *\n * `Subscription` instances that are added to this instance will automatically remove themselves\n * if they are unsubscribed. Functions and {@link Unsubscribable} objects that you wish to remove\n * will need to be removed manually with {@link #remove}\n *\n * @param teardown The finalization logic to add to this subscription.\n */\n add(teardown: TeardownLogic): void {\n // Only add the finalizer if it's not undefined\n // and don't add a subscription to itself.\n if (teardown && teardown !== this) {\n if (this.closed) {\n // If this subscription is already closed,\n // execute whatever finalizer is handed to it automatically.\n execFinalizer(teardown);\n } else {\n if (teardown instanceof Subscription) {\n // We don't add closed subscriptions, and we don't add the same subscription\n // twice. Subscription unsubscribe is idempotent.\n if (teardown.closed || teardown._hasParent(this)) {\n return;\n }\n teardown._addParent(this);\n }\n (this._finalizers = this._finalizers ?? []).push(teardown);\n }\n }\n }\n\n /**\n * Checks to see if a this subscription already has a particular parent.\n * This will signal that this subscription has already been added to the parent in question.\n * @param parent the parent to check for\n */\n private _hasParent(parent: Subscription) {\n const { _parentage } = this;\n return _parentage === parent || (Array.isArray(_parentage) && _parentage.includes(parent));\n }\n\n /**\n * Adds a parent to this subscription so it can be removed from the parent if it\n * unsubscribes on it's own.\n *\n * NOTE: THIS ASSUMES THAT {@link _hasParent} HAS ALREADY BEEN CHECKED.\n * @param parent The parent subscription to add\n */\n private _addParent(parent: Subscription) {\n const { _parentage } = this;\n this._parentage = Array.isArray(_parentage) ? (_parentage.push(parent), _parentage) : _parentage ? [_parentage, parent] : parent;\n }\n\n /**\n * Called on a child when it is removed via {@link #remove}.\n * @param parent The parent to remove\n */\n private _removeParent(parent: Subscription) {\n const { _parentage } = this;\n if (_parentage === parent) {\n this._parentage = null;\n } else if (Array.isArray(_parentage)) {\n arrRemove(_parentage, parent);\n }\n }\n\n /**\n * Removes a finalizer from this subscription that was previously added with the {@link #add} method.\n *\n * Note that `Subscription` instances, when unsubscribed, will automatically remove themselves\n * from every other `Subscription` they have been added to. This means that using the `remove` method\n * is not a common thing and should be used thoughtfully.\n *\n * If you add the same finalizer instance of a function or an unsubscribable object to a `Subscription` instance\n * more than once, you will need to call `remove` the same number of times to remove all instances.\n *\n * All finalizer instances are removed to free up memory upon unsubscription.\n *\n * @param teardown The finalizer to remove from this subscription\n */\n remove(teardown: Exclude): void {\n const { _finalizers } = this;\n _finalizers && arrRemove(_finalizers, teardown);\n\n if (teardown instanceof Subscription) {\n teardown._removeParent(this);\n }\n }\n}\n\nexport const EMPTY_SUBSCRIPTION = Subscription.EMPTY;\n\nexport function isSubscription(value: any): value is Subscription {\n return (\n value instanceof Subscription ||\n (value && 'closed' in value && isFunction(value.remove) && isFunction(value.add) && isFunction(value.unsubscribe))\n );\n}\n\nfunction execFinalizer(finalizer: Unsubscribable | (() => void)) {\n if (isFunction(finalizer)) {\n finalizer();\n } else {\n finalizer.unsubscribe();\n }\n}\n", "import { Subscriber } from './Subscriber';\nimport { ObservableNotification } from './types';\n\n/**\n * The {@link GlobalConfig} object for RxJS. It is used to configure things\n * like how to react on unhandled errors.\n */\nexport const config: GlobalConfig = {\n onUnhandledError: null,\n onStoppedNotification: null,\n Promise: undefined,\n useDeprecatedSynchronousErrorHandling: false,\n useDeprecatedNextContext: false,\n};\n\n/**\n * The global configuration object for RxJS, used to configure things\n * like how to react on unhandled errors. Accessible via {@link config}\n * object.\n */\nexport interface GlobalConfig {\n /**\n * A registration point for unhandled errors from RxJS. These are errors that\n * cannot were not handled by consuming code in the usual subscription path. For\n * example, if you have this configured, and you subscribe to an observable without\n * providing an error handler, errors from that subscription will end up here. This\n * will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onUnhandledError: ((err: any) => void) | null;\n\n /**\n * A registration point for notifications that cannot be sent to subscribers because they\n * have completed, errored or have been explicitly unsubscribed. By default, next, complete\n * and error notifications sent to stopped subscribers are noops. However, sometimes callers\n * might want a different behavior. For example, with sources that attempt to report errors\n * to stopped subscribers, a caller can configure RxJS to throw an unhandled error instead.\n * This will _always_ be called asynchronously on another job in the runtime. This is because\n * we do not want errors thrown in this user-configured handler to interfere with the\n * behavior of the library.\n */\n onStoppedNotification: ((notification: ObservableNotification, subscriber: Subscriber) => void) | null;\n\n /**\n * The promise constructor used by default for {@link Observable#toPromise toPromise} and {@link Observable#forEach forEach}\n * methods.\n *\n * @deprecated As of version 8, RxJS will no longer support this sort of injection of a\n * Promise constructor. If you need a Promise implementation other than native promises,\n * please polyfill/patch Promise as you see appropriate. Will be removed in v8.\n */\n Promise?: PromiseConstructorLike;\n\n /**\n * If true, turns on synchronous error rethrowing, which is a deprecated behavior\n * in v6 and higher. This behavior enables bad patterns like wrapping a subscribe\n * call in a try/catch block. It also enables producer interference, a nasty bug\n * where a multicast can be broken for all observers by a downstream consumer with\n * an unhandled error. DO NOT USE THIS FLAG UNLESS IT'S NEEDED TO BUY TIME\n * FOR MIGRATION REASONS.\n *\n * @deprecated As of version 8, RxJS will no longer support synchronous throwing\n * of unhandled errors. All errors will be thrown on a separate call stack to prevent bad\n * behaviors described above. Will be removed in v8.\n */\n useDeprecatedSynchronousErrorHandling: boolean;\n\n /**\n * If true, enables an as-of-yet undocumented feature from v5: The ability to access\n * `unsubscribe()` via `this` context in `next` functions created in observers passed\n * to `subscribe`.\n *\n * This is being removed because the performance was severely problematic, and it could also cause\n * issues when types other than POJOs are passed to subscribe as subscribers, as they will likely have\n * their `this` context overwritten.\n *\n * @deprecated As of version 8, RxJS will no longer support altering the\n * context of next functions provided as part of an observer to Subscribe. Instead,\n * you will have access to a subscription or a signal or token that will allow you to do things like\n * unsubscribe and test closed status. Will be removed in v8.\n */\n useDeprecatedNextContext: boolean;\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetTimeoutFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearTimeoutFunction = (handle: TimerHandle) => void;\n\ninterface TimeoutProvider {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n delegate:\n | {\n setTimeout: SetTimeoutFunction;\n clearTimeout: ClearTimeoutFunction;\n }\n | undefined;\n}\n\nexport const timeoutProvider: TimeoutProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setTimeout(handler: () => void, timeout?: number, ...args) {\n const { delegate } = timeoutProvider;\n if (delegate?.setTimeout) {\n return delegate.setTimeout(handler, timeout, ...args);\n }\n return setTimeout(handler, timeout, ...args);\n },\n clearTimeout(handle) {\n const { delegate } = timeoutProvider;\n return (delegate?.clearTimeout || clearTimeout)(handle as any);\n },\n delegate: undefined,\n};\n", "import { config } from '../config';\nimport { timeoutProvider } from '../scheduler/timeoutProvider';\n\n/**\n * Handles an error on another job either with the user-configured {@link onUnhandledError},\n * or by throwing it on that new job so it can be picked up by `window.onerror`, `process.on('error')`, etc.\n *\n * This should be called whenever there is an error that is out-of-band with the subscription\n * or when an error hits a terminal boundary of the subscription and no error handler was provided.\n *\n * @param err the error to report\n */\nexport function reportUnhandledError(err: any) {\n timeoutProvider.setTimeout(() => {\n const { onUnhandledError } = config;\n if (onUnhandledError) {\n // Execute the user-configured error handler.\n onUnhandledError(err);\n } else {\n // Throw so it is picked up by the runtime's uncaught error mechanism.\n throw err;\n }\n });\n}\n", "/* tslint:disable:no-empty */\nexport function noop() { }\n", "import { CompleteNotification, NextNotification, ErrorNotification } from './types';\n\n/**\n * A completion object optimized for memory use and created to be the\n * same \"shape\" as other notifications in v8.\n * @internal\n */\nexport const COMPLETE_NOTIFICATION = (() => createNotification('C', undefined, undefined) as CompleteNotification)();\n\n/**\n * Internal use only. Creates an optimized error notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function errorNotification(error: any): ErrorNotification {\n return createNotification('E', undefined, error) as any;\n}\n\n/**\n * Internal use only. Creates an optimized next notification that is the same \"shape\"\n * as other notifications.\n * @internal\n */\nexport function nextNotification(value: T) {\n return createNotification('N', value, undefined) as NextNotification;\n}\n\n/**\n * Ensures that all notifications created internally have the same \"shape\" in v8.\n *\n * TODO: This is only exported to support a crazy legacy test in `groupBy`.\n * @internal\n */\nexport function createNotification(kind: 'N' | 'E' | 'C', value: any, error: any) {\n return {\n kind,\n value,\n error,\n };\n}\n", "import { config } from '../config';\n\nlet context: { errorThrown: boolean; error: any } | null = null;\n\n/**\n * Handles dealing with errors for super-gross mode. Creates a context, in which\n * any synchronously thrown errors will be passed to {@link captureError}. Which\n * will record the error such that it will be rethrown after the call back is complete.\n * TODO: Remove in v8\n * @param cb An immediately executed function.\n */\nexport function errorContext(cb: () => void) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n const isRoot = !context;\n if (isRoot) {\n context = { errorThrown: false, error: null };\n }\n cb();\n if (isRoot) {\n const { errorThrown, error } = context!;\n context = null;\n if (errorThrown) {\n throw error;\n }\n }\n } else {\n // This is the general non-deprecated path for everyone that\n // isn't crazy enough to use super-gross mode (useDeprecatedSynchronousErrorHandling)\n cb();\n }\n}\n\n/**\n * Captures errors only in super-gross mode.\n * @param err the error to capture\n */\nexport function captureError(err: any) {\n if (config.useDeprecatedSynchronousErrorHandling && context) {\n context.errorThrown = true;\n context.error = err;\n }\n}\n", "import { isFunction } from './util/isFunction';\nimport { Observer, ObservableNotification } from './types';\nimport { isSubscription, Subscription } from './Subscription';\nimport { config } from './config';\nimport { reportUnhandledError } from './util/reportUnhandledError';\nimport { noop } from './util/noop';\nimport { nextNotification, errorNotification, COMPLETE_NOTIFICATION } from './NotificationFactories';\nimport { timeoutProvider } from './scheduler/timeoutProvider';\nimport { captureError } from './util/errorContext';\n\n/**\n * Implements the {@link Observer} interface and extends the\n * {@link Subscription} class. While the {@link Observer} is the public API for\n * consuming the values of an {@link Observable}, all Observers get converted to\n * a Subscriber, in order to provide Subscription-like capabilities such as\n * `unsubscribe`. Subscriber is a common type in RxJS, and crucial for\n * implementing operators, but it is rarely used as a public API.\n *\n * @class Subscriber\n */\nexport class Subscriber extends Subscription implements Observer {\n /**\n * A static factory for a Subscriber, given a (potentially partial) definition\n * of an Observer.\n * @param next The `next` callback of an Observer.\n * @param error The `error` callback of an\n * Observer.\n * @param complete The `complete` callback of an\n * Observer.\n * @return A Subscriber wrapping the (partially defined)\n * Observer represented by the given arguments.\n * @nocollapse\n * @deprecated Do not use. Will be removed in v8. There is no replacement for this\n * method, and there is no reason to be creating instances of `Subscriber` directly.\n * If you have a specific use case, please file an issue.\n */\n static create(next?: (x?: T) => void, error?: (e?: any) => void, complete?: () => void): Subscriber {\n return new SafeSubscriber(next, error, complete);\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected isStopped: boolean = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n protected destination: Subscriber | Observer; // this `any` is the escape hatch to erase extra type param (e.g. R)\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * There is no reason to directly create an instance of Subscriber. This type is exported for typings reasons.\n */\n constructor(destination?: Subscriber | Observer) {\n super();\n if (destination) {\n this.destination = destination;\n // Automatically chain subscriptions together here.\n // if destination is a Subscription, then it is a Subscriber.\n if (isSubscription(destination)) {\n destination.add(this);\n }\n } else {\n this.destination = EMPTY_OBSERVER;\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `next` from\n * the Observable, with a value. The Observable may call this method 0 or more\n * times.\n * @param {T} [value] The `next` value.\n * @return {void}\n */\n next(value?: T): void {\n if (this.isStopped) {\n handleStoppedNotification(nextNotification(value), this);\n } else {\n this._next(value!);\n }\n }\n\n /**\n * The {@link Observer} callback to receive notifications of type `error` from\n * the Observable, with an attached `Error`. Notifies the Observer that\n * the Observable has experienced an error condition.\n * @param {any} [err] The `error` exception.\n * @return {void}\n */\n error(err?: any): void {\n if (this.isStopped) {\n handleStoppedNotification(errorNotification(err), this);\n } else {\n this.isStopped = true;\n this._error(err);\n }\n }\n\n /**\n * The {@link Observer} callback to receive a valueless notification of type\n * `complete` from the Observable. Notifies the Observer that the Observable\n * has finished sending push-based notifications.\n * @return {void}\n */\n complete(): void {\n if (this.isStopped) {\n handleStoppedNotification(COMPLETE_NOTIFICATION, this);\n } else {\n this.isStopped = true;\n this._complete();\n }\n }\n\n unsubscribe(): void {\n if (!this.closed) {\n this.isStopped = true;\n super.unsubscribe();\n this.destination = null!;\n }\n }\n\n protected _next(value: T): void {\n this.destination.next(value);\n }\n\n protected _error(err: any): void {\n try {\n this.destination.error(err);\n } finally {\n this.unsubscribe();\n }\n }\n\n protected _complete(): void {\n try {\n this.destination.complete();\n } finally {\n this.unsubscribe();\n }\n }\n}\n\n/**\n * This bind is captured here because we want to be able to have\n * compatibility with monoid libraries that tend to use a method named\n * `bind`. In particular, a library called Monio requires this.\n */\nconst _bind = Function.prototype.bind;\n\nfunction bind any>(fn: Fn, thisArg: any): Fn {\n return _bind.call(fn, thisArg);\n}\n\n/**\n * Internal optimization only, DO NOT EXPOSE.\n * @internal\n */\nclass ConsumerObserver implements Observer {\n constructor(private partialObserver: Partial>) {}\n\n next(value: T): void {\n const { partialObserver } = this;\n if (partialObserver.next) {\n try {\n partialObserver.next(value);\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n\n error(err: any): void {\n const { partialObserver } = this;\n if (partialObserver.error) {\n try {\n partialObserver.error(err);\n } catch (error) {\n handleUnhandledError(error);\n }\n } else {\n handleUnhandledError(err);\n }\n }\n\n complete(): void {\n const { partialObserver } = this;\n if (partialObserver.complete) {\n try {\n partialObserver.complete();\n } catch (error) {\n handleUnhandledError(error);\n }\n }\n }\n}\n\nexport class SafeSubscriber extends Subscriber {\n constructor(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((e?: any) => void) | null,\n complete?: (() => void) | null\n ) {\n super();\n\n let partialObserver: Partial>;\n if (isFunction(observerOrNext) || !observerOrNext) {\n // The first argument is a function, not an observer. The next\n // two arguments *could* be observers, or they could be empty.\n partialObserver = {\n next: (observerOrNext ?? undefined) as (((value: T) => void) | undefined),\n error: error ?? undefined,\n complete: complete ?? undefined,\n };\n } else {\n // The first argument is a partial observer.\n let context: any;\n if (this && config.useDeprecatedNextContext) {\n // This is a deprecated path that made `this.unsubscribe()` available in\n // next handler functions passed to subscribe. This only exists behind a flag\n // now, as it is *very* slow.\n context = Object.create(observerOrNext);\n context.unsubscribe = () => this.unsubscribe();\n partialObserver = {\n next: observerOrNext.next && bind(observerOrNext.next, context),\n error: observerOrNext.error && bind(observerOrNext.error, context),\n complete: observerOrNext.complete && bind(observerOrNext.complete, context),\n };\n } else {\n // The \"normal\" path. Just use the partial observer directly.\n partialObserver = observerOrNext;\n }\n }\n\n // Wrap the partial observer to ensure it's a full observer, and\n // make sure proper error handling is accounted for.\n this.destination = new ConsumerObserver(partialObserver);\n }\n}\n\nfunction handleUnhandledError(error: any) {\n if (config.useDeprecatedSynchronousErrorHandling) {\n captureError(error);\n } else {\n // Ideal path, we report this as an unhandled error,\n // which is thrown on a new call stack.\n reportUnhandledError(error);\n }\n}\n\n/**\n * An error handler used when no error handler was supplied\n * to the SafeSubscriber -- meaning no error handler was supplied\n * do the `subscribe` call on our observable.\n * @param err The error to handle\n */\nfunction defaultErrorHandler(err: any) {\n throw err;\n}\n\n/**\n * A handler for notifications that cannot be sent to a stopped subscriber.\n * @param notification The notification being sent\n * @param subscriber The stopped subscriber\n */\nfunction handleStoppedNotification(notification: ObservableNotification, subscriber: Subscriber) {\n const { onStoppedNotification } = config;\n onStoppedNotification && timeoutProvider.setTimeout(() => onStoppedNotification(notification, subscriber));\n}\n\n/**\n * The observer used as a stub for subscriptions where the user did not\n * pass any arguments to `subscribe`. Comes with the default error handling\n * behavior.\n */\nexport const EMPTY_OBSERVER: Readonly> & { closed: true } = {\n closed: true,\n next: noop,\n error: defaultErrorHandler,\n complete: noop,\n};\n", "/**\n * Symbol.observable or a string \"@@observable\". Used for interop\n *\n * @deprecated We will no longer be exporting this symbol in upcoming versions of RxJS.\n * Instead polyfill and use Symbol.observable directly *or* use https://www.npmjs.com/package/symbol-observable\n */\nexport const observable: string | symbol = (() => (typeof Symbol === 'function' && Symbol.observable) || '@@observable')();\n", "/**\n * This function takes one parameter and just returns it. Simply put,\n * this is like `(x: T): T => x`.\n *\n * ## Examples\n *\n * This is useful in some cases when using things like `mergeMap`\n *\n * ```ts\n * import { interval, take, map, range, mergeMap, identity } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(5));\n *\n * const result$ = source$.pipe(\n * map(i => range(i)),\n * mergeMap(identity) // same as mergeMap(x => x)\n * );\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * Or when you want to selectively apply an operator\n *\n * ```ts\n * import { interval, take, identity } from 'rxjs';\n *\n * const shouldLimit = () => Math.random() < 0.5;\n *\n * const source$ = interval(1000);\n *\n * const result$ = source$.pipe(shouldLimit() ? take(5) : identity);\n *\n * result$.subscribe({\n * next: console.log\n * });\n * ```\n *\n * @param x Any value that is returned by this function\n * @returns The value passed as the first parameter to this function\n */\nexport function identity(x: T): T {\n return x;\n}\n", "import { identity } from './identity';\nimport { UnaryFunction } from '../types';\n\nexport function pipe(): typeof identity;\nexport function pipe(fn1: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction): UnaryFunction;\nexport function pipe(fn1: UnaryFunction, fn2: UnaryFunction, fn3: UnaryFunction): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction\n): UnaryFunction;\nexport function pipe(\n fn1: UnaryFunction,\n fn2: UnaryFunction,\n fn3: UnaryFunction,\n fn4: UnaryFunction,\n fn5: UnaryFunction,\n fn6: UnaryFunction,\n fn7: UnaryFunction,\n fn8: UnaryFunction,\n fn9: UnaryFunction,\n ...fns: UnaryFunction[]\n): UnaryFunction;\n\n/**\n * pipe() can be called on one or more functions, each of which can take one argument (\"UnaryFunction\")\n * and uses it to return a value.\n * It returns a function that takes one argument, passes it to the first UnaryFunction, and then\n * passes the result to the next one, passes that result to the next one, and so on. \n */\nexport function pipe(...fns: Array>): UnaryFunction {\n return pipeFromArray(fns);\n}\n\n/** @internal */\nexport function pipeFromArray(fns: Array>): UnaryFunction {\n if (fns.length === 0) {\n return identity as UnaryFunction;\n }\n\n if (fns.length === 1) {\n return fns[0];\n }\n\n return function piped(input: T): R {\n return fns.reduce((prev: any, fn: UnaryFunction) => fn(prev), input as any);\n };\n}\n", "import { Operator } from './Operator';\nimport { SafeSubscriber, Subscriber } from './Subscriber';\nimport { isSubscription, Subscription } from './Subscription';\nimport { TeardownLogic, OperatorFunction, Subscribable, Observer } from './types';\nimport { observable as Symbol_observable } from './symbol/observable';\nimport { pipeFromArray } from './util/pipe';\nimport { config } from './config';\nimport { isFunction } from './util/isFunction';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A representation of any set of values over any amount of time. This is the most basic building block\n * of RxJS.\n *\n * @class Observable\n */\nexport class Observable implements Subscribable {\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n source: Observable | undefined;\n\n /**\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n */\n operator: Operator | undefined;\n\n /**\n * @constructor\n * @param {Function} subscribe the function that is called when the Observable is\n * initially subscribed to. This function is given a Subscriber, to which new values\n * can be `next`ed, or an `error` method can be called to raise an error, or\n * `complete` can be called to notify of a successful completion.\n */\n constructor(subscribe?: (this: Observable, subscriber: Subscriber) => TeardownLogic) {\n if (subscribe) {\n this._subscribe = subscribe;\n }\n }\n\n // HACK: Since TypeScript inherits static properties too, we have to\n // fight against TypeScript here so Subject can have a different static create signature\n /**\n * Creates a new Observable by calling the Observable constructor\n * @owner Observable\n * @method create\n * @param {Function} subscribe? the subscriber function to be passed to the Observable constructor\n * @return {Observable} a new observable\n * @nocollapse\n * @deprecated Use `new Observable()` instead. Will be removed in v8.\n */\n static create: (...args: any[]) => any = (subscribe?: (subscriber: Subscriber) => TeardownLogic) => {\n return new Observable(subscribe);\n };\n\n /**\n * Creates a new Observable, with this Observable instance as the source, and the passed\n * operator defined as the new observable's operator.\n * @method lift\n * @param operator the operator defining the operation to take on the observable\n * @return a new observable with the Operator applied\n * @deprecated Internal implementation detail, do not use directly. Will be made internal in v8.\n * If you have implemented an operator using `lift`, it is recommended that you create an\n * operator by simply returning `new Observable()` directly. See \"Creating new operators from\n * scratch\" section here: https://rxjs.dev/guide/operators\n */\n lift(operator?: Operator): Observable {\n const observable = new Observable();\n observable.source = this;\n observable.operator = operator;\n return observable;\n }\n\n subscribe(observerOrNext?: Partial> | ((value: T) => void)): Subscription;\n /** @deprecated Instead of passing separate callback arguments, use an observer argument. Signatures taking separate callback arguments will be removed in v8. Details: https://rxjs.dev/deprecations/subscribe-arguments */\n subscribe(next?: ((value: T) => void) | null, error?: ((error: any) => void) | null, complete?: (() => void) | null): Subscription;\n /**\n * Invokes an execution of an Observable and registers Observer handlers for notifications it will emit.\n *\n * Use it when you have all these Observables, but still nothing is happening.\n *\n * `subscribe` is not a regular operator, but a method that calls Observable's internal `subscribe` function. It\n * might be for example a function that you passed to Observable's constructor, but most of the time it is\n * a library implementation, which defines what will be emitted by an Observable, and when it be will emitted. This means\n * that calling `subscribe` is actually the moment when Observable starts its work, not when it is created, as it is often\n * the thought.\n *\n * Apart from starting the execution of an Observable, this method allows you to listen for values\n * that an Observable emits, as well as for when it completes or errors. You can achieve this in two\n * of the following ways.\n *\n * The first way is creating an object that implements {@link Observer} interface. It should have methods\n * defined by that interface, but note that it should be just a regular JavaScript object, which you can create\n * yourself in any way you want (ES6 class, classic function constructor, object literal etc.). In particular, do\n * not attempt to use any RxJS implementation details to create Observers - you don't need them. Remember also\n * that your object does not have to implement all methods. If you find yourself creating a method that doesn't\n * do anything, you can simply omit it. Note however, if the `error` method is not provided and an error happens,\n * it will be thrown asynchronously. Errors thrown asynchronously cannot be caught using `try`/`catch`. Instead,\n * use the {@link onUnhandledError} configuration option or use a runtime handler (like `window.onerror` or\n * `process.on('error)`) to be notified of unhandled errors. Because of this, it's recommended that you provide\n * an `error` method to avoid missing thrown errors.\n *\n * The second way is to give up on Observer object altogether and simply provide callback functions in place of its methods.\n * This means you can provide three functions as arguments to `subscribe`, where the first function is equivalent\n * of a `next` method, the second of an `error` method and the third of a `complete` method. Just as in case of an Observer,\n * if you do not need to listen for something, you can omit a function by passing `undefined` or `null`,\n * since `subscribe` recognizes these functions by where they were placed in function call. When it comes\n * to the `error` function, as with an Observer, if not provided, errors emitted by an Observable will be thrown asynchronously.\n *\n * You can, however, subscribe with no parameters at all. This may be the case where you're not interested in terminal events\n * and you also handled emissions internally by using operators (e.g. using `tap`).\n *\n * Whichever style of calling `subscribe` you use, in both cases it returns a Subscription object.\n * This object allows you to call `unsubscribe` on it, which in turn will stop the work that an Observable does and will clean\n * up all resources that an Observable used. Note that cancelling a subscription will not call `complete` callback\n * provided to `subscribe` function, which is reserved for a regular completion signal that comes from an Observable.\n *\n * Remember that callbacks provided to `subscribe` are not guaranteed to be called asynchronously.\n * It is an Observable itself that decides when these functions will be called. For example {@link of}\n * by default emits all its values synchronously. Always check documentation for how given Observable\n * will behave when subscribed and if its default behavior can be modified with a `scheduler`.\n *\n * #### Examples\n *\n * Subscribe with an {@link guide/observer Observer}\n *\n * ```ts\n * import { of } from 'rxjs';\n *\n * const sumObserver = {\n * sum: 0,\n * next(value) {\n * console.log('Adding: ' + value);\n * this.sum = this.sum + value;\n * },\n * error() {\n * // We actually could just remove this method,\n * // since we do not really care about errors right now.\n * },\n * complete() {\n * console.log('Sum equals: ' + this.sum);\n * }\n * };\n *\n * of(1, 2, 3) // Synchronously emits 1, 2, 3 and then completes.\n * .subscribe(sumObserver);\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Subscribe with functions ({@link deprecations/subscribe-arguments deprecated})\n *\n * ```ts\n * import { of } from 'rxjs'\n *\n * let sum = 0;\n *\n * of(1, 2, 3).subscribe(\n * value => {\n * console.log('Adding: ' + value);\n * sum = sum + value;\n * },\n * undefined,\n * () => console.log('Sum equals: ' + sum)\n * );\n *\n * // Logs:\n * // 'Adding: 1'\n * // 'Adding: 2'\n * // 'Adding: 3'\n * // 'Sum equals: 6'\n * ```\n *\n * Cancel a subscription\n *\n * ```ts\n * import { interval } from 'rxjs';\n *\n * const subscription = interval(1000).subscribe({\n * next(num) {\n * console.log(num)\n * },\n * complete() {\n * // Will not be called, even when cancelling subscription.\n * console.log('completed!');\n * }\n * });\n *\n * setTimeout(() => {\n * subscription.unsubscribe();\n * console.log('unsubscribed!');\n * }, 2500);\n *\n * // Logs:\n * // 0 after 1s\n * // 1 after 2s\n * // 'unsubscribed!' after 2.5s\n * ```\n *\n * @param {Observer|Function} observerOrNext (optional) Either an observer with methods to be called,\n * or the first of three possible handlers, which is the handler for each value emitted from the subscribed\n * Observable.\n * @param {Function} error (optional) A handler for a terminal event resulting from an error. If no error handler is provided,\n * the error will be thrown asynchronously as unhandled.\n * @param {Function} complete (optional) A handler for a terminal event resulting from successful completion.\n * @return {Subscription} a subscription reference to the registered handlers\n * @method subscribe\n */\n subscribe(\n observerOrNext?: Partial> | ((value: T) => void) | null,\n error?: ((error: any) => void) | null,\n complete?: (() => void) | null\n ): Subscription {\n const subscriber = isSubscriber(observerOrNext) ? observerOrNext : new SafeSubscriber(observerOrNext, error, complete);\n\n errorContext(() => {\n const { operator, source } = this;\n subscriber.add(\n operator\n ? // We're dealing with a subscription in the\n // operator chain to one of our lifted operators.\n operator.call(subscriber, source)\n : source\n ? // If `source` has a value, but `operator` does not, something that\n // had intimate knowledge of our API, like our `Subject`, must have\n // set it. We're going to just call `_subscribe` directly.\n this._subscribe(subscriber)\n : // In all other cases, we're likely wrapping a user-provided initializer\n // function, so we need to catch errors and handle them appropriately.\n this._trySubscribe(subscriber)\n );\n });\n\n return subscriber;\n }\n\n /** @internal */\n protected _trySubscribe(sink: Subscriber): TeardownLogic {\n try {\n return this._subscribe(sink);\n } catch (err) {\n // We don't need to return anything in this case,\n // because it's just going to try to `add()` to a subscription\n // above.\n sink.error(err);\n }\n }\n\n /**\n * Used as a NON-CANCELLABLE means of subscribing to an observable, for use with\n * APIs that expect promises, like `async/await`. You cannot unsubscribe from this.\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * #### Example\n *\n * ```ts\n * import { interval, take } from 'rxjs';\n *\n * const source$ = interval(1000).pipe(take(4));\n *\n * async function getTotal() {\n * let total = 0;\n *\n * await source$.forEach(value => {\n * total += value;\n * console.log('observable -> ' + value);\n * });\n *\n * return total;\n * }\n *\n * getTotal().then(\n * total => console.log('Total: ' + total)\n * );\n *\n * // Expected:\n * // 'observable -> 0'\n * // 'observable -> 1'\n * // 'observable -> 2'\n * // 'observable -> 3'\n * // 'Total: 6'\n * ```\n *\n * @param next a handler for each value emitted by the observable\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n */\n forEach(next: (value: T) => void): Promise;\n\n /**\n * @param next a handler for each value emitted by the observable\n * @param promiseCtor a constructor function used to instantiate the Promise\n * @return a promise that either resolves on observable completion or\n * rejects with the handled error\n * @deprecated Passing a Promise constructor will no longer be available\n * in upcoming versions of RxJS. This is because it adds weight to the library, for very\n * little benefit. If you need this functionality, it is recommended that you either\n * polyfill Promise, or you create an adapter to convert the returned native promise\n * to whatever promise implementation you wanted. Will be removed in v8.\n */\n forEach(next: (value: T) => void, promiseCtor: PromiseConstructorLike): Promise;\n\n forEach(next: (value: T) => void, promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n const subscriber = new SafeSubscriber({\n next: (value) => {\n try {\n next(value);\n } catch (err) {\n reject(err);\n subscriber.unsubscribe();\n }\n },\n error: reject,\n complete: resolve,\n });\n this.subscribe(subscriber);\n }) as Promise;\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): TeardownLogic {\n return this.source?.subscribe(subscriber);\n }\n\n /**\n * An interop point defined by the es7-observable spec https://github.com/zenparsing/es-observable\n * @method Symbol.observable\n * @return {Observable} this instance of the observable\n */\n [Symbol_observable]() {\n return this;\n }\n\n /* tslint:disable:max-line-length */\n pipe(): Observable;\n pipe(op1: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction): Observable;\n pipe(op1: OperatorFunction, op2: OperatorFunction, op3: OperatorFunction): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction\n ): Observable;\n pipe(\n op1: OperatorFunction,\n op2: OperatorFunction,\n op3: OperatorFunction,\n op4: OperatorFunction,\n op5: OperatorFunction,\n op6: OperatorFunction,\n op7: OperatorFunction,\n op8: OperatorFunction,\n op9: OperatorFunction,\n ...operations: OperatorFunction[]\n ): Observable;\n /* tslint:enable:max-line-length */\n\n /**\n * Used to stitch together functional operators into a chain.\n * @method pipe\n * @return {Observable} the Observable result of all of the operators having\n * been called in the order they were passed in.\n *\n * ## Example\n *\n * ```ts\n * import { interval, filter, map, scan } from 'rxjs';\n *\n * interval(1000)\n * .pipe(\n * filter(x => x % 2 === 0),\n * map(x => x + x),\n * scan((acc, x) => acc + x)\n * )\n * .subscribe(x => console.log(x));\n * ```\n */\n pipe(...operations: OperatorFunction[]): Observable {\n return pipeFromArray(operations)(this);\n }\n\n /* tslint:disable:max-line-length */\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: typeof Promise): Promise;\n /** @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise */\n toPromise(PromiseCtor: PromiseConstructorLike): Promise;\n /* tslint:enable:max-line-length */\n\n /**\n * Subscribe to this Observable and get a Promise resolving on\n * `complete` with the last emission (if any).\n *\n * **WARNING**: Only use this with observables you *know* will complete. If the source\n * observable does not complete, you will end up with a promise that is hung up, and\n * potentially all of the state of an async function hanging out in memory. To avoid\n * this situation, look into adding something like {@link timeout}, {@link take},\n * {@link takeWhile}, or {@link takeUntil} amongst others.\n *\n * @method toPromise\n * @param [promiseCtor] a constructor function used to instantiate\n * the Promise\n * @return A Promise that resolves with the last value emit, or\n * rejects on an error. If there were no emissions, Promise\n * resolves with undefined.\n * @deprecated Replaced with {@link firstValueFrom} and {@link lastValueFrom}. Will be removed in v8. Details: https://rxjs.dev/deprecations/to-promise\n */\n toPromise(promiseCtor?: PromiseConstructorLike): Promise {\n promiseCtor = getPromiseCtor(promiseCtor);\n\n return new promiseCtor((resolve, reject) => {\n let value: T | undefined;\n this.subscribe(\n (x: T) => (value = x),\n (err: any) => reject(err),\n () => resolve(value)\n );\n }) as Promise;\n }\n}\n\n/**\n * Decides between a passed promise constructor from consuming code,\n * A default configured promise constructor, and the native promise\n * constructor and returns it. If nothing can be found, it will throw\n * an error.\n * @param promiseCtor The optional promise constructor to passed by consuming code\n */\nfunction getPromiseCtor(promiseCtor: PromiseConstructorLike | undefined) {\n return promiseCtor ?? config.Promise ?? Promise;\n}\n\nfunction isObserver(value: any): value is Observer {\n return value && isFunction(value.next) && isFunction(value.error) && isFunction(value.complete);\n}\n\nfunction isSubscriber(value: any): value is Subscriber {\n return (value && value instanceof Subscriber) || (isObserver(value) && isSubscription(value));\n}\n", "import { Observable } from '../Observable';\nimport { Subscriber } from '../Subscriber';\nimport { OperatorFunction } from '../types';\nimport { isFunction } from './isFunction';\n\n/**\n * Used to determine if an object is an Observable with a lift function.\n */\nexport function hasLift(source: any): source is { lift: InstanceType['lift'] } {\n return isFunction(source?.lift);\n}\n\n/**\n * Creates an `OperatorFunction`. Used to define operators throughout the library in a concise way.\n * @param init The logic to connect the liftedSource to the subscriber at the moment of subscription.\n */\nexport function operate(\n init: (liftedSource: Observable, subscriber: Subscriber) => (() => void) | void\n): OperatorFunction {\n return (source: Observable) => {\n if (hasLift(source)) {\n return source.lift(function (this: Subscriber, liftedSource: Observable) {\n try {\n return init(liftedSource, this);\n } catch (err) {\n this.error(err);\n }\n });\n }\n throw new TypeError('Unable to lift unknown Observable type');\n };\n}\n", "import { Subscriber } from '../Subscriber';\n\n/**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional teardown logic here. This will only be called on teardown if the\n * subscriber itself is not already closed. This is called after all other teardown logic is executed.\n */\nexport function createOperatorSubscriber(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n onFinalize?: () => void\n): Subscriber {\n return new OperatorSubscriber(destination, onNext, onComplete, onError, onFinalize);\n}\n\n/**\n * A generic helper for allowing operators to be created with a Subscriber and\n * use closures to capture necessary state from the operator function itself.\n */\nexport class OperatorSubscriber extends Subscriber {\n /**\n * Creates an instance of an `OperatorSubscriber`.\n * @param destination The downstream subscriber.\n * @param onNext Handles next values, only called if this subscriber is not stopped or closed. Any\n * error that occurs in this function is caught and sent to the `error` method of this subscriber.\n * @param onError Handles errors from the subscription, any errors that occur in this handler are caught\n * and send to the `destination` error handler.\n * @param onComplete Handles completion notification from the subscription. Any errors that occur in\n * this handler are sent to the `destination` error handler.\n * @param onFinalize Additional finalization logic here. This will only be called on finalization if the\n * subscriber itself is not already closed. This is called after all other finalization logic is executed.\n * @param shouldUnsubscribe An optional check to see if an unsubscribe call should truly unsubscribe.\n * NOTE: This currently **ONLY** exists to support the strange behavior of {@link groupBy}, where unsubscription\n * to the resulting observable does not actually disconnect from the source if there are active subscriptions\n * to any grouped observable. (DO NOT EXPOSE OR USE EXTERNALLY!!!)\n */\n constructor(\n destination: Subscriber,\n onNext?: (value: T) => void,\n onComplete?: () => void,\n onError?: (err: any) => void,\n private onFinalize?: () => void,\n private shouldUnsubscribe?: () => boolean\n ) {\n // It's important - for performance reasons - that all of this class's\n // members are initialized and that they are always initialized in the same\n // order. This will ensure that all OperatorSubscriber instances have the\n // same hidden class in V8. This, in turn, will help keep the number of\n // hidden classes involved in property accesses within the base class as\n // low as possible. If the number of hidden classes involved exceeds four,\n // the property accesses will become megamorphic and performance penalties\n // will be incurred - i.e. inline caches won't be used.\n //\n // The reasons for ensuring all instances have the same hidden class are\n // further discussed in this blog post from Benedikt Meurer:\n // https://benediktmeurer.de/2018/03/23/impact-of-polymorphism-on-component-based-frameworks-like-react/\n super(destination);\n this._next = onNext\n ? function (this: OperatorSubscriber, value: T) {\n try {\n onNext(value);\n } catch (err) {\n destination.error(err);\n }\n }\n : super._next;\n this._error = onError\n ? function (this: OperatorSubscriber, err: any) {\n try {\n onError(err);\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._error;\n this._complete = onComplete\n ? function (this: OperatorSubscriber) {\n try {\n onComplete();\n } catch (err) {\n // Send any errors that occur down stream.\n destination.error(err);\n } finally {\n // Ensure finalization.\n this.unsubscribe();\n }\n }\n : super._complete;\n }\n\n unsubscribe() {\n if (!this.shouldUnsubscribe || this.shouldUnsubscribe()) {\n const { closed } = this;\n super.unsubscribe();\n // Execute additional teardown if we have any and we didn't already do so.\n !closed && this.onFinalize?.();\n }\n }\n}\n", "import { Subscription } from '../Subscription';\n\ninterface AnimationFrameProvider {\n schedule(callback: FrameRequestCallback): Subscription;\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n delegate:\n | {\n requestAnimationFrame: typeof requestAnimationFrame;\n cancelAnimationFrame: typeof cancelAnimationFrame;\n }\n | undefined;\n}\n\nexport const animationFrameProvider: AnimationFrameProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n schedule(callback) {\n let request = requestAnimationFrame;\n let cancel: typeof cancelAnimationFrame | undefined = cancelAnimationFrame;\n const { delegate } = animationFrameProvider;\n if (delegate) {\n request = delegate.requestAnimationFrame;\n cancel = delegate.cancelAnimationFrame;\n }\n const handle = request((timestamp) => {\n // Clear the cancel function. The request has been fulfilled, so\n // attempting to cancel the request upon unsubscription would be\n // pointless.\n cancel = undefined;\n callback(timestamp);\n });\n return new Subscription(() => cancel?.(handle));\n },\n requestAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.requestAnimationFrame || requestAnimationFrame)(...args);\n },\n cancelAnimationFrame(...args) {\n const { delegate } = animationFrameProvider;\n return (delegate?.cancelAnimationFrame || cancelAnimationFrame)(...args);\n },\n delegate: undefined,\n};\n", "import { createErrorClass } from './createErrorClass';\n\nexport interface ObjectUnsubscribedError extends Error {}\n\nexport interface ObjectUnsubscribedErrorCtor {\n /**\n * @deprecated Internal implementation detail. Do not construct error instances.\n * Cannot be tagged as internal: https://github.com/ReactiveX/rxjs/issues/6269\n */\n new (): ObjectUnsubscribedError;\n}\n\n/**\n * An error thrown when an action is invalid because the object has been\n * unsubscribed.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n *\n * @class ObjectUnsubscribedError\n */\nexport const ObjectUnsubscribedError: ObjectUnsubscribedErrorCtor = createErrorClass(\n (_super) =>\n function ObjectUnsubscribedErrorImpl(this: any) {\n _super(this);\n this.name = 'ObjectUnsubscribedError';\n this.message = 'object unsubscribed';\n }\n);\n", "import { Operator } from './Operator';\nimport { Observable } from './Observable';\nimport { Subscriber } from './Subscriber';\nimport { Subscription, EMPTY_SUBSCRIPTION } from './Subscription';\nimport { Observer, SubscriptionLike, TeardownLogic } from './types';\nimport { ObjectUnsubscribedError } from './util/ObjectUnsubscribedError';\nimport { arrRemove } from './util/arrRemove';\nimport { errorContext } from './util/errorContext';\n\n/**\n * A Subject is a special type of Observable that allows values to be\n * multicasted to many Observers. Subjects are like EventEmitters.\n *\n * Every Subject is an Observable and an Observer. You can subscribe to a\n * Subject, and you can call next to feed values as well as error and complete.\n */\nexport class Subject extends Observable implements SubscriptionLike {\n closed = false;\n\n private currentObservers: Observer[] | null = null;\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n observers: Observer[] = [];\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n isStopped = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n hasError = false;\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n thrownError: any = null;\n\n /**\n * Creates a \"subject\" by basically gluing an observer to an observable.\n *\n * @nocollapse\n * @deprecated Recommended you do not use. Will be removed at some point in the future. Plans for replacement still under discussion.\n */\n static create: (...args: any[]) => any = (destination: Observer, source: Observable): AnonymousSubject => {\n return new AnonymousSubject(destination, source);\n };\n\n constructor() {\n // NOTE: This must be here to obscure Observable's constructor.\n super();\n }\n\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n lift(operator: Operator): Observable {\n const subject = new AnonymousSubject(this, this);\n subject.operator = operator as any;\n return subject as any;\n }\n\n /** @internal */\n protected _throwIfClosed() {\n if (this.closed) {\n throw new ObjectUnsubscribedError();\n }\n }\n\n next(value: T) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n if (!this.currentObservers) {\n this.currentObservers = Array.from(this.observers);\n }\n for (const observer of this.currentObservers) {\n observer.next(value);\n }\n }\n });\n }\n\n error(err: any) {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.hasError = this.isStopped = true;\n this.thrownError = err;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.error(err);\n }\n }\n });\n }\n\n complete() {\n errorContext(() => {\n this._throwIfClosed();\n if (!this.isStopped) {\n this.isStopped = true;\n const { observers } = this;\n while (observers.length) {\n observers.shift()!.complete();\n }\n }\n });\n }\n\n unsubscribe() {\n this.isStopped = this.closed = true;\n this.observers = this.currentObservers = null!;\n }\n\n get observed() {\n return this.observers?.length > 0;\n }\n\n /** @internal */\n protected _trySubscribe(subscriber: Subscriber): TeardownLogic {\n this._throwIfClosed();\n return super._trySubscribe(subscriber);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._checkFinalizedStatuses(subscriber);\n return this._innerSubscribe(subscriber);\n }\n\n /** @internal */\n protected _innerSubscribe(subscriber: Subscriber) {\n const { hasError, isStopped, observers } = this;\n if (hasError || isStopped) {\n return EMPTY_SUBSCRIPTION;\n }\n this.currentObservers = null;\n observers.push(subscriber);\n return new Subscription(() => {\n this.currentObservers = null;\n arrRemove(observers, subscriber);\n });\n }\n\n /** @internal */\n protected _checkFinalizedStatuses(subscriber: Subscriber) {\n const { hasError, thrownError, isStopped } = this;\n if (hasError) {\n subscriber.error(thrownError);\n } else if (isStopped) {\n subscriber.complete();\n }\n }\n\n /**\n * Creates a new Observable with this Subject as the source. You can do this\n * to create custom Observer-side logic of the Subject and conceal it from\n * code that uses the Observable.\n * @return {Observable} Observable that the Subject casts to\n */\n asObservable(): Observable {\n const observable: any = new Observable();\n observable.source = this;\n return observable;\n }\n}\n\n/**\n * @class AnonymousSubject\n */\nexport class AnonymousSubject extends Subject {\n constructor(\n /** @deprecated Internal implementation detail, do not use directly. Will be made internal in v8. */\n public destination?: Observer,\n source?: Observable\n ) {\n super();\n this.source = source;\n }\n\n next(value: T) {\n this.destination?.next?.(value);\n }\n\n error(err: any) {\n this.destination?.error?.(err);\n }\n\n complete() {\n this.destination?.complete?.();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n return this.source?.subscribe(subscriber) ?? EMPTY_SUBSCRIPTION;\n }\n}\n", "import { Subject } from './Subject';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\n\n/**\n * A variant of Subject that requires an initial value and emits its current\n * value whenever it is subscribed to.\n *\n * @class BehaviorSubject\n */\nexport class BehaviorSubject extends Subject {\n constructor(private _value: T) {\n super();\n }\n\n get value(): T {\n return this.getValue();\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n const subscription = super._subscribe(subscriber);\n !subscription.closed && subscriber.next(this._value);\n return subscription;\n }\n\n getValue(): T {\n const { hasError, thrownError, _value } = this;\n if (hasError) {\n throw thrownError;\n }\n this._throwIfClosed();\n return _value;\n }\n\n next(value: T): void {\n super.next((this._value = value));\n }\n}\n", "import { TimestampProvider } from '../types';\n\ninterface DateTimestampProvider extends TimestampProvider {\n delegate: TimestampProvider | undefined;\n}\n\nexport const dateTimestampProvider: DateTimestampProvider = {\n now() {\n // Use the variable rather than `this` so that the function can be called\n // without being bound to the provider.\n return (dateTimestampProvider.delegate || Date).now();\n },\n delegate: undefined,\n};\n", "import { Subject } from './Subject';\nimport { TimestampProvider } from './types';\nimport { Subscriber } from './Subscriber';\nimport { Subscription } from './Subscription';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * A variant of {@link Subject} that \"replays\" old values to new subscribers by emitting them when they first subscribe.\n *\n * `ReplaySubject` has an internal buffer that will store a specified number of values that it has observed. Like `Subject`,\n * `ReplaySubject` \"observes\" values by having them passed to its `next` method. When it observes a value, it will store that\n * value for a time determined by the configuration of the `ReplaySubject`, as passed to its constructor.\n *\n * When a new subscriber subscribes to the `ReplaySubject` instance, it will synchronously emit all values in its buffer in\n * a First-In-First-Out (FIFO) manner. The `ReplaySubject` will also complete, if it has observed completion; and it will\n * error if it has observed an error.\n *\n * There are two main configuration items to be concerned with:\n *\n * 1. `bufferSize` - This will determine how many items are stored in the buffer, defaults to infinite.\n * 2. `windowTime` - The amount of time to hold a value in the buffer before removing it from the buffer.\n *\n * Both configurations may exist simultaneously. So if you would like to buffer a maximum of 3 values, as long as the values\n * are less than 2 seconds old, you could do so with a `new ReplaySubject(3, 2000)`.\n *\n * ### Differences with BehaviorSubject\n *\n * `BehaviorSubject` is similar to `new ReplaySubject(1)`, with a couple of exceptions:\n *\n * 1. `BehaviorSubject` comes \"primed\" with a single value upon construction.\n * 2. `ReplaySubject` will replay values, even after observing an error, where `BehaviorSubject` will not.\n *\n * @see {@link Subject}\n * @see {@link BehaviorSubject}\n * @see {@link shareReplay}\n */\nexport class ReplaySubject extends Subject {\n private _buffer: (T | number)[] = [];\n private _infiniteTimeWindow = true;\n\n /**\n * @param bufferSize The size of the buffer to replay on subscription\n * @param windowTime The amount of time the buffered items will stay buffered\n * @param timestampProvider An object with a `now()` method that provides the current timestamp. This is used to\n * calculate the amount of time something has been buffered.\n */\n constructor(\n private _bufferSize = Infinity,\n private _windowTime = Infinity,\n private _timestampProvider: TimestampProvider = dateTimestampProvider\n ) {\n super();\n this._infiniteTimeWindow = _windowTime === Infinity;\n this._bufferSize = Math.max(1, _bufferSize);\n this._windowTime = Math.max(1, _windowTime);\n }\n\n next(value: T): void {\n const { isStopped, _buffer, _infiniteTimeWindow, _timestampProvider, _windowTime } = this;\n if (!isStopped) {\n _buffer.push(value);\n !_infiniteTimeWindow && _buffer.push(_timestampProvider.now() + _windowTime);\n }\n this._trimBuffer();\n super.next(value);\n }\n\n /** @internal */\n protected _subscribe(subscriber: Subscriber): Subscription {\n this._throwIfClosed();\n this._trimBuffer();\n\n const subscription = this._innerSubscribe(subscriber);\n\n const { _infiniteTimeWindow, _buffer } = this;\n // We use a copy here, so reentrant code does not mutate our array while we're\n // emitting it to a new subscriber.\n const copy = _buffer.slice();\n for (let i = 0; i < copy.length && !subscriber.closed; i += _infiniteTimeWindow ? 1 : 2) {\n subscriber.next(copy[i] as T);\n }\n\n this._checkFinalizedStatuses(subscriber);\n\n return subscription;\n }\n\n private _trimBuffer() {\n const { _bufferSize, _timestampProvider, _buffer, _infiniteTimeWindow } = this;\n // If we don't have an infinite buffer size, and we're over the length,\n // use splice to truncate the old buffer values off. Note that we have to\n // double the size for instances where we're not using an infinite time window\n // because we're storing the values and the timestamps in the same array.\n const adjustedBufferSize = (_infiniteTimeWindow ? 1 : 2) * _bufferSize;\n _bufferSize < Infinity && adjustedBufferSize < _buffer.length && _buffer.splice(0, _buffer.length - adjustedBufferSize);\n\n // Now, if we're not in an infinite time window, remove all values where the time is\n // older than what is allowed.\n if (!_infiniteTimeWindow) {\n const now = _timestampProvider.now();\n let last = 0;\n // Search the array for the first timestamp that isn't expired and\n // truncate the buffer up to that point.\n for (let i = 1; i < _buffer.length && (_buffer[i] as number) <= now; i += 2) {\n last = i;\n }\n last && _buffer.splice(0, last + 1);\n }\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Subscription } from '../Subscription';\nimport { SchedulerAction } from '../types';\n\n/**\n * A unit of work to be executed in a `scheduler`. An action is typically\n * created from within a {@link SchedulerLike} and an RxJS user does not need to concern\n * themselves about creating and manipulating an Action.\n *\n * ```ts\n * class Action extends Subscription {\n * new (scheduler: Scheduler, work: (state?: T) => void);\n * schedule(state?: T, delay: number = 0): Subscription;\n * }\n * ```\n *\n * @class Action\n */\nexport class Action extends Subscription {\n constructor(scheduler: Scheduler, work: (this: SchedulerAction, state?: T) => void) {\n super();\n }\n /**\n * Schedules this action on its parent {@link SchedulerLike} for execution. May be passed\n * some context object, `state`. May happen at some point in the future,\n * according to the `delay` parameter, if specified.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler.\n * @return {void}\n */\n public schedule(state?: T, delay: number = 0): Subscription {\n return this;\n }\n}\n", "import type { TimerHandle } from './timerHandle';\ntype SetIntervalFunction = (handler: () => void, timeout?: number, ...args: any[]) => TimerHandle;\ntype ClearIntervalFunction = (handle: TimerHandle) => void;\n\ninterface IntervalProvider {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n delegate:\n | {\n setInterval: SetIntervalFunction;\n clearInterval: ClearIntervalFunction;\n }\n | undefined;\n}\n\nexport const intervalProvider: IntervalProvider = {\n // When accessing the delegate, use the variable rather than `this` so that\n // the functions can be called without being bound to the provider.\n setInterval(handler: () => void, timeout?: number, ...args) {\n const { delegate } = intervalProvider;\n if (delegate?.setInterval) {\n return delegate.setInterval(handler, timeout, ...args);\n }\n return setInterval(handler, timeout, ...args);\n },\n clearInterval(handle) {\n const { delegate } = intervalProvider;\n return (delegate?.clearInterval || clearInterval)(handle as any);\n },\n delegate: undefined,\n};\n", "import { Action } from './Action';\nimport { SchedulerAction } from '../types';\nimport { Subscription } from '../Subscription';\nimport { AsyncScheduler } from './AsyncScheduler';\nimport { intervalProvider } from './intervalProvider';\nimport { arrRemove } from '../util/arrRemove';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncAction extends Action {\n public id: TimerHandle | undefined;\n public state?: T;\n // @ts-ignore: Property has no initializer and is not definitely assigned\n public delay: number;\n protected pending: boolean = false;\n\n constructor(protected scheduler: AsyncScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (this.closed) {\n return this;\n }\n\n // Always replace the current state with the new state.\n this.state = state;\n\n const id = this.id;\n const scheduler = this.scheduler;\n\n //\n // Important implementation note:\n //\n // Actions only execute once by default, unless rescheduled from within the\n // scheduled callback. This allows us to implement single and repeat\n // actions via the same code path, without adding API surface area, as well\n // as mimic traditional recursion but across asynchronous boundaries.\n //\n // However, JS runtimes and timers distinguish between intervals achieved by\n // serial `setTimeout` calls vs. a single `setInterval` call. An interval of\n // serial `setTimeout` calls can be individually delayed, which delays\n // scheduling the next `setTimeout`, and so on. `setInterval` attempts to\n // guarantee the interval callback will be invoked more precisely to the\n // interval period, regardless of load.\n //\n // Therefore, we use `setInterval` to schedule single and repeat actions.\n // If the action reschedules itself with the same delay, the interval is not\n // canceled. If the action doesn't reschedule, or reschedules with a\n // different delay, the interval will be canceled after scheduled callback\n // execution.\n //\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, delay);\n }\n\n // Set the pending flag indicating that this action has been scheduled, or\n // has recursively rescheduled itself.\n this.pending = true;\n\n this.delay = delay;\n // If this action has already an async Id, don't request a new one.\n this.id = this.id ?? this.requestAsyncId(scheduler, this.id, delay);\n\n return this;\n }\n\n protected requestAsyncId(scheduler: AsyncScheduler, _id?: TimerHandle, delay: number = 0): TimerHandle {\n return intervalProvider.setInterval(scheduler.flush.bind(scheduler, this), delay);\n }\n\n protected recycleAsyncId(_scheduler: AsyncScheduler, id?: TimerHandle, delay: number | null = 0): TimerHandle | undefined {\n // If this action is rescheduled with the same delay time, don't clear the interval id.\n if (delay != null && this.delay === delay && this.pending === false) {\n return id;\n }\n // Otherwise, if the action's delay time is different from the current delay,\n // or the action has been rescheduled before it's executed, clear the interval id\n if (id != null) {\n intervalProvider.clearInterval(id);\n }\n\n return undefined;\n }\n\n /**\n * Immediately executes this action and the `work` it contains.\n * @return {any}\n */\n public execute(state: T, delay: number): any {\n if (this.closed) {\n return new Error('executing a cancelled action');\n }\n\n this.pending = false;\n const error = this._execute(state, delay);\n if (error) {\n return error;\n } else if (this.pending === false && this.id != null) {\n // Dequeue if the action didn't reschedule itself. Don't call\n // unsubscribe(), because the action could reschedule later.\n // For example:\n // ```\n // scheduler.schedule(function doWork(counter) {\n // /* ... I'm a busy worker bee ... */\n // var originalAction = this;\n // /* wait 100ms before rescheduling the action */\n // setTimeout(function () {\n // originalAction.schedule(counter + 1);\n // }, 100);\n // }, 1000);\n // ```\n this.id = this.recycleAsyncId(this.scheduler, this.id, null);\n }\n }\n\n protected _execute(state: T, _delay: number): any {\n let errored: boolean = false;\n let errorValue: any;\n try {\n this.work(state);\n } catch (e) {\n errored = true;\n // HACK: Since code elsewhere is relying on the \"truthiness\" of the\n // return here, we can't have it return \"\" or 0 or false.\n // TODO: Clean this up when we refactor schedulers mid-version-8 or so.\n errorValue = e ? e : new Error('Scheduled action threw falsy error');\n }\n if (errored) {\n this.unsubscribe();\n return errorValue;\n }\n }\n\n unsubscribe() {\n if (!this.closed) {\n const { id, scheduler } = this;\n const { actions } = scheduler;\n\n this.work = this.state = this.scheduler = null!;\n this.pending = false;\n\n arrRemove(actions, this);\n if (id != null) {\n this.id = this.recycleAsyncId(scheduler, id, null);\n }\n\n this.delay = null!;\n super.unsubscribe();\n }\n }\n}\n", "import { Action } from './scheduler/Action';\nimport { Subscription } from './Subscription';\nimport { SchedulerLike, SchedulerAction } from './types';\nimport { dateTimestampProvider } from './scheduler/dateTimestampProvider';\n\n/**\n * An execution context and a data structure to order tasks and schedule their\n * execution. Provides a notion of (potentially virtual) time, through the\n * `now()` getter method.\n *\n * Each unit of work in a Scheduler is called an `Action`.\n *\n * ```ts\n * class Scheduler {\n * now(): number;\n * schedule(work, delay?, state?): Subscription;\n * }\n * ```\n *\n * @class Scheduler\n * @deprecated Scheduler is an internal implementation detail of RxJS, and\n * should not be used directly. Rather, create your own class and implement\n * {@link SchedulerLike}. Will be made internal in v8.\n */\nexport class Scheduler implements SchedulerLike {\n public static now: () => number = dateTimestampProvider.now;\n\n constructor(private schedulerActionCtor: typeof Action, now: () => number = Scheduler.now) {\n this.now = now;\n }\n\n /**\n * A getter method that returns a number representing the current time\n * (at the time this function was called) according to the scheduler's own\n * internal clock.\n * @return {number} A number that represents the current time. May or may not\n * have a relation to wall-clock time. May or may not refer to a time unit\n * (e.g. milliseconds).\n */\n public now: () => number;\n\n /**\n * Schedules a function, `work`, for execution. May happen at some point in\n * the future, according to the `delay` parameter, if specified. May be passed\n * some context object, `state`, which will be passed to the `work` function.\n *\n * The given arguments will be processed an stored as an Action object in a\n * queue of actions.\n *\n * @param {function(state: ?T): ?Subscription} work A function representing a\n * task, or some unit of work to be executed by the Scheduler.\n * @param {number} [delay] Time to wait before executing the work, where the\n * time unit is implicit and defined by the Scheduler itself.\n * @param {T} [state] Some contextual data that the `work` function uses when\n * called by the Scheduler.\n * @return {Subscription} A subscription in order to be able to unsubscribe\n * the scheduled work.\n */\n public schedule(work: (this: SchedulerAction, state?: T) => void, delay: number = 0, state?: T): Subscription {\n return new this.schedulerActionCtor(this, work).schedule(state, delay);\n }\n}\n", "import { Scheduler } from '../Scheduler';\nimport { Action } from './Action';\nimport { AsyncAction } from './AsyncAction';\nimport { TimerHandle } from './timerHandle';\n\nexport class AsyncScheduler extends Scheduler {\n public actions: Array> = [];\n /**\n * A flag to indicate whether the Scheduler is currently executing a batch of\n * queued actions.\n * @type {boolean}\n * @internal\n */\n public _active: boolean = false;\n /**\n * An internal ID used to track the latest asynchronous task such as those\n * coming from `setTimeout`, `setInterval`, `requestAnimationFrame`, and\n * others.\n * @type {any}\n * @internal\n */\n public _scheduled: TimerHandle | undefined;\n\n constructor(SchedulerAction: typeof Action, now: () => number = Scheduler.now) {\n super(SchedulerAction, now);\n }\n\n public flush(action: AsyncAction): void {\n const { actions } = this;\n\n if (this._active) {\n actions.push(action);\n return;\n }\n\n let error: any;\n this._active = true;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions.shift()!)); // exhaust the scheduler queue\n\n this._active = false;\n\n if (error) {\n while ((action = actions.shift()!)) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\n/**\n *\n * Async Scheduler\n *\n * Schedule task as if you used setTimeout(task, duration)\n *\n * `async` scheduler schedules tasks asynchronously, by putting them on the JavaScript\n * event loop queue. It is best used to delay tasks in time or to schedule tasks repeating\n * in intervals.\n *\n * If you just want to \"defer\" task, that is to perform it right after currently\n * executing synchronous code ends (commonly achieved by `setTimeout(deferredTask, 0)`),\n * better choice will be the {@link asapScheduler} scheduler.\n *\n * ## Examples\n * Use async scheduler to delay task\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * const task = () => console.log('it works!');\n *\n * asyncScheduler.schedule(task, 2000);\n *\n * // After 2 seconds logs:\n * // \"it works!\"\n * ```\n *\n * Use async scheduler to repeat task in intervals\n * ```ts\n * import { asyncScheduler } from 'rxjs';\n *\n * function task(state) {\n * console.log(state);\n * this.schedule(state + 1, 1000); // `this` references currently executing Action,\n * // which we reschedule with new state and delay\n * }\n *\n * asyncScheduler.schedule(task, 3000, 0);\n *\n * // Logs:\n * // 0 after 3s\n * // 1 after 4s\n * // 2 after 5s\n * // 3 after 6s\n * ```\n */\n\nexport const asyncScheduler = new AsyncScheduler(AsyncAction);\n\n/**\n * @deprecated Renamed to {@link asyncScheduler}. Will be removed in v8.\n */\nexport const async = asyncScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { Subscription } from '../Subscription';\nimport { QueueScheduler } from './QueueScheduler';\nimport { SchedulerAction } from '../types';\nimport { TimerHandle } from './timerHandle';\n\nexport class QueueAction extends AsyncAction {\n constructor(protected scheduler: QueueScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n public schedule(state?: T, delay: number = 0): Subscription {\n if (delay > 0) {\n return super.schedule(state, delay);\n }\n this.delay = delay;\n this.state = state;\n this.scheduler.flush(this);\n return this;\n }\n\n public execute(state: T, delay: number): any {\n return delay > 0 || this.closed ? super.execute(state, delay) : this._execute(state, delay);\n }\n\n protected requestAsyncId(scheduler: QueueScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n\n if ((delay != null && delay > 0) || (delay == null && this.delay > 0)) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n\n // Otherwise flush the scheduler starting with this action.\n scheduler.flush(this);\n\n // HACK: In the past, this was returning `void`. However, `void` isn't a valid\n // `TimerHandle`, and generally the return value here isn't really used. So the\n // compromise is to return `0` which is both \"falsy\" and a valid `TimerHandle`,\n // as opposed to refactoring every other instanceo of `requestAsyncId`.\n return 0;\n }\n}\n", "import { AsyncScheduler } from './AsyncScheduler';\n\nexport class QueueScheduler extends AsyncScheduler {\n}\n", "import { QueueAction } from './QueueAction';\nimport { QueueScheduler } from './QueueScheduler';\n\n/**\n *\n * Queue Scheduler\n *\n * Put every next task on a queue, instead of executing it immediately\n *\n * `queue` scheduler, when used with delay, behaves the same as {@link asyncScheduler} scheduler.\n *\n * When used without delay, it schedules given task synchronously - executes it right when\n * it is scheduled. However when called recursively, that is when inside the scheduled task,\n * another task is scheduled with queue scheduler, instead of executing immediately as well,\n * that task will be put on a queue and wait for current one to finish.\n *\n * This means that when you execute task with `queue` scheduler, you are sure it will end\n * before any other task scheduled with that scheduler will start.\n *\n * ## Examples\n * Schedule recursively first, then do something\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(() => {\n * queueScheduler.schedule(() => console.log('second')); // will not happen now, but will be put on a queue\n *\n * console.log('first');\n * });\n *\n * // Logs:\n * // \"first\"\n * // \"second\"\n * ```\n *\n * Reschedule itself recursively\n * ```ts\n * import { queueScheduler } from 'rxjs';\n *\n * queueScheduler.schedule(function(state) {\n * if (state !== 0) {\n * console.log('before', state);\n * this.schedule(state - 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * console.log('after', state);\n * }\n * }, 0, 3);\n *\n * // In scheduler that runs recursively, you would expect:\n * // \"before\", 3\n * // \"before\", 2\n * // \"before\", 1\n * // \"after\", 1\n * // \"after\", 2\n * // \"after\", 3\n *\n * // But with queue it logs:\n * // \"before\", 3\n * // \"after\", 3\n * // \"before\", 2\n * // \"after\", 2\n * // \"before\", 1\n * // \"after\", 1\n * ```\n */\n\nexport const queueScheduler = new QueueScheduler(QueueAction);\n\n/**\n * @deprecated Renamed to {@link queueScheduler}. Will be removed in v8.\n */\nexport const queue = queueScheduler;\n", "import { AsyncAction } from './AsyncAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\nimport { SchedulerAction } from '../types';\nimport { animationFrameProvider } from './animationFrameProvider';\nimport { TimerHandle } from './timerHandle';\n\nexport class AnimationFrameAction extends AsyncAction {\n constructor(protected scheduler: AnimationFrameScheduler, protected work: (this: SchedulerAction, state?: T) => void) {\n super(scheduler, work);\n }\n\n protected requestAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle {\n // If delay is greater than 0, request as an async action.\n if (delay !== null && delay > 0) {\n return super.requestAsyncId(scheduler, id, delay);\n }\n // Push the action to the end of the scheduler queue.\n scheduler.actions.push(this);\n // If an animation frame has already been requested, don't request another\n // one. If an animation frame hasn't been requested yet, request one. Return\n // the current animation frame request id.\n return scheduler._scheduled || (scheduler._scheduled = animationFrameProvider.requestAnimationFrame(() => scheduler.flush(undefined)));\n }\n\n protected recycleAsyncId(scheduler: AnimationFrameScheduler, id?: TimerHandle, delay: number = 0): TimerHandle | undefined {\n // If delay exists and is greater than 0, or if the delay is null (the\n // action wasn't rescheduled) but was originally scheduled as an async\n // action, then recycle as an async action.\n if (delay != null ? delay > 0 : this.delay > 0) {\n return super.recycleAsyncId(scheduler, id, delay);\n }\n // If the scheduler queue has no remaining actions with the same async id,\n // cancel the requested animation frame and set the scheduled flag to\n // undefined so the next AnimationFrameAction will request its own.\n const { actions } = scheduler;\n if (id != null && actions[actions.length - 1]?.id !== id) {\n animationFrameProvider.cancelAnimationFrame(id as number);\n scheduler._scheduled = undefined;\n }\n // Return undefined so the action knows to request a new async id if it's rescheduled.\n return undefined;\n }\n}\n", "import { AsyncAction } from './AsyncAction';\nimport { AsyncScheduler } from './AsyncScheduler';\n\nexport class AnimationFrameScheduler extends AsyncScheduler {\n public flush(action?: AsyncAction): void {\n this._active = true;\n // The async id that effects a call to flush is stored in _scheduled.\n // Before executing an action, it's necessary to check the action's async\n // id to determine whether it's supposed to be executed in the current\n // flush.\n // Previous implementations of this method used a count to determine this,\n // but that was unsound, as actions that are unsubscribed - i.e. cancelled -\n // are removed from the actions array and that can shift actions that are\n // scheduled to be executed in a subsequent flush into positions at which\n // they are executed within the current flush.\n const flushId = this._scheduled;\n this._scheduled = undefined;\n\n const { actions } = this;\n let error: any;\n action = action || actions.shift()!;\n\n do {\n if ((error = action.execute(action.state, action.delay))) {\n break;\n }\n } while ((action = actions[0]) && action.id === flushId && actions.shift());\n\n this._active = false;\n\n if (error) {\n while ((action = actions[0]) && action.id === flushId && actions.shift()) {\n action.unsubscribe();\n }\n throw error;\n }\n }\n}\n", "import { AnimationFrameAction } from './AnimationFrameAction';\nimport { AnimationFrameScheduler } from './AnimationFrameScheduler';\n\n/**\n *\n * Animation Frame Scheduler\n *\n * Perform task when `window.requestAnimationFrame` would fire\n *\n * When `animationFrame` scheduler is used with delay, it will fall back to {@link asyncScheduler} scheduler\n * behaviour.\n *\n * Without delay, `animationFrame` scheduler can be used to create smooth browser animations.\n * It makes sure scheduled task will happen just before next browser content repaint,\n * thus performing animations as efficiently as possible.\n *\n * ## Example\n * Schedule div height animation\n * ```ts\n * // html:
\n * import { animationFrameScheduler } from 'rxjs';\n *\n * const div = document.querySelector('div');\n *\n * animationFrameScheduler.schedule(function(height) {\n * div.style.height = height + \"px\";\n *\n * this.schedule(height + 1); // `this` references currently executing Action,\n * // which we reschedule with new state\n * }, 0, 0);\n *\n * // You will see a div element growing in height\n * ```\n */\n\nexport const animationFrameScheduler = new AnimationFrameScheduler(AnimationFrameAction);\n\n/**\n * @deprecated Renamed to {@link animationFrameScheduler}. Will be removed in v8.\n */\nexport const animationFrame = animationFrameScheduler;\n", "import { Observable } from '../Observable';\nimport { SchedulerLike } from '../types';\n\n/**\n * A simple Observable that emits no items to the Observer and immediately\n * emits a complete notification.\n *\n * Just emits 'complete', and nothing else.\n *\n * ![](empty.png)\n *\n * A simple Observable that only emits the complete notification. It can be used\n * for composing with other Observables, such as in a {@link mergeMap}.\n *\n * ## Examples\n *\n * Log complete notification\n *\n * ```ts\n * import { EMPTY } from 'rxjs';\n *\n * EMPTY.subscribe({\n * next: () => console.log('Next'),\n * complete: () => console.log('Complete!')\n * });\n *\n * // Outputs\n * // Complete!\n * ```\n *\n * Emit the number 7, then complete\n *\n * ```ts\n * import { EMPTY, startWith } from 'rxjs';\n *\n * const result = EMPTY.pipe(startWith(7));\n * result.subscribe(x => console.log(x));\n *\n * // Outputs\n * // 7\n * ```\n *\n * Map and flatten only odd numbers to the sequence `'a'`, `'b'`, `'c'`\n *\n * ```ts\n * import { interval, mergeMap, of, EMPTY } from 'rxjs';\n *\n * const interval$ = interval(1000);\n * const result = interval$.pipe(\n * mergeMap(x => x % 2 === 1 ? of('a', 'b', 'c') : EMPTY),\n * );\n * result.subscribe(x => console.log(x));\n *\n * // Results in the following to the console:\n * // x is equal to the count on the interval, e.g. (0, 1, 2, 3, ...)\n * // x will occur every 1000ms\n * // if x % 2 is equal to 1, print a, b, c (each on its own)\n * // if x % 2 is not equal to 1, nothing will be output\n * ```\n *\n * @see {@link Observable}\n * @see {@link NEVER}\n * @see {@link of}\n * @see {@link throwError}\n */\nexport const EMPTY = new Observable((subscriber) => subscriber.complete());\n\n/**\n * @param scheduler A {@link SchedulerLike} to use for scheduling\n * the emission of the complete notification.\n * @deprecated Replaced with the {@link EMPTY} constant or {@link scheduled} (e.g. `scheduled([], scheduler)`). Will be removed in v8.\n */\nexport function empty(scheduler?: SchedulerLike) {\n return scheduler ? emptyScheduled(scheduler) : EMPTY;\n}\n\nfunction emptyScheduled(scheduler: SchedulerLike) {\n return new Observable((subscriber) => scheduler.schedule(() => subscriber.complete()));\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport function isScheduler(value: any): value is SchedulerLike {\n return value && isFunction(value.schedule);\n}\n", "import { SchedulerLike } from '../types';\nimport { isFunction } from './isFunction';\nimport { isScheduler } from './isScheduler';\n\nfunction last(arr: T[]): T | undefined {\n return arr[arr.length - 1];\n}\n\nexport function popResultSelector(args: any[]): ((...args: unknown[]) => unknown) | undefined {\n return isFunction(last(args)) ? args.pop() : undefined;\n}\n\nexport function popScheduler(args: any[]): SchedulerLike | undefined {\n return isScheduler(last(args)) ? args.pop() : undefined;\n}\n\nexport function popNumber(args: any[], defaultValue: number): number {\n return typeof last(args) === 'number' ? args.pop()! : defaultValue;\n}\n", "export const isArrayLike = ((x: any): x is ArrayLike => x && typeof x.length === 'number' && typeof x !== 'function');", "import { isFunction } from \"./isFunction\";\n\n/**\n * Tests to see if the object is \"thennable\".\n * @param value the object to test\n */\nexport function isPromise(value: any): value is PromiseLike {\n return isFunction(value?.then);\n}\n", "import { InteropObservable } from '../types';\nimport { observable as Symbol_observable } from '../symbol/observable';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being Observable (but not necessary an Rx Observable) */\nexport function isInteropObservable(input: any): input is InteropObservable {\n return isFunction(input[Symbol_observable]);\n}\n", "import { isFunction } from './isFunction';\n\nexport function isAsyncIterable(obj: any): obj is AsyncIterable {\n return Symbol.asyncIterator && isFunction(obj?.[Symbol.asyncIterator]);\n}\n", "/**\n * Creates the TypeError to throw if an invalid object is passed to `from` or `scheduled`.\n * @param input The object that was passed.\n */\nexport function createInvalidObservableTypeError(input: any) {\n // TODO: We should create error codes that can be looked up, so this can be less verbose.\n return new TypeError(\n `You provided ${\n input !== null && typeof input === 'object' ? 'an invalid object' : `'${input}'`\n } where a stream was expected. You can provide an Observable, Promise, ReadableStream, Array, AsyncIterable, or Iterable.`\n );\n}\n", "export function getSymbolIterator(): symbol {\n if (typeof Symbol !== 'function' || !Symbol.iterator) {\n return '@@iterator' as any;\n }\n\n return Symbol.iterator;\n}\n\nexport const iterator = getSymbolIterator();\n", "import { iterator as Symbol_iterator } from '../symbol/iterator';\nimport { isFunction } from './isFunction';\n\n/** Identifies an input as being an Iterable */\nexport function isIterable(input: any): input is Iterable {\n return isFunction(input?.[Symbol_iterator]);\n}\n", "import { ReadableStreamLike } from '../types';\nimport { isFunction } from './isFunction';\n\nexport async function* readableStreamLikeToAsyncGenerator(readableStream: ReadableStreamLike): AsyncGenerator {\n const reader = readableStream.getReader();\n try {\n while (true) {\n const { value, done } = await reader.read();\n if (done) {\n return;\n }\n yield value!;\n }\n } finally {\n reader.releaseLock();\n }\n}\n\nexport function isReadableStreamLike(obj: any): obj is ReadableStreamLike {\n // We don't want to use instanceof checks because they would return\n // false for instances from another Realm, like an + + + + + + + + + + + + + + + + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Adding a new Resource Allocation to the project

+

If one resource allocation is not sufficient for a project, PI or project managers +may request additional allocations by clicking on the "Request Resource Allocation" +button on the Allocations section of the project details. This will show the page +where all existing users for the project will be listed on the bottom of the request +form. PIs can select desired user(s) to make the requested resource allocations +available on their NERC's OpenStack or OpenShift projects.

+

Here, you can view the Resource Type, information about your Allocated Project, +status, End Date of the allocation, and actions button or any pending actions as +shown below:

+

Adding a new Resource Allocation

+

Adding a new Resource Allocation to your OpenStack project

+

Adding a new Resource Allocation to your OpenStack project

+
+

Important: Requested/Approved Allocated OpenStack Storage Quota & Cost

+

Ensure you choose NERC (OpenStack) in the Resource option and specify your +anticipated computing units. Each allocation, whether requested or approved, +will be billed based on the pay-as-you-go model. The exception is for +Storage quotas, where the cost is determined by your requested and approved +allocation values +to reserve storage from the total NESE storage pool. For NERC (OpenStack) +Resource Allocations, the Storage quotas are specified by the "OpenStack +Volume Quota (GiB)" and "OpenStack Swift Quota (GiB)" allocation attributes. +If you have common questions or need more information, refer to our +Billing FAQs for comprehensive +answers. Keep in mind that you can easily scale and expand your current resource +allocations within your project by following this documentation +later on.

+
+

Adding a new Resource Allocation to your OpenShift project

+

Adding a new Resource Allocation to your OpenShift project

+
+

Important: Requested/Approved Allocated OpenShift Storage Quota & Cost

+

Ensure you choose NERC-OCP (OpenShift) in the Resource option (Always Remember: +the first option, i.e. NERC (OpenStack) is selected by default!) and specify +your anticipated computing units. Each allocation, whether requested or approved, +will be billed based on the pay-as-you-go model. The exception is for +Storage quotas, where the cost is determined by +your requested and approved allocation values +to reserve storage from the total NESE storage pool. For NERC-OCP (OpenShift) +Resource Allocations, storage quotas are specified by the "OpenShift Request +on Storage Quota (GiB)" and "OpenShift Limit on Ephemeral Storage Quota (GiB)" +allocation attributes. If you have common questions or need more information, +refer to our Billing FAQs +for comprehensive answers. Keep in mind that you can easily scale and expand +your current resource allocations within your project by following +this documentation +later on.

+
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/get-started/allocation/adding-a-project/index.html b/get-started/allocation/adding-a-project/index.html new file mode 100644 index 00000000..9dcbe821 --- /dev/null +++ b/get-started/allocation/adding-a-project/index.html @@ -0,0 +1,4523 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

A New Project Creation Process

+

What PIs need to fill in order to request a Project?

+

Once logged in to NERC's ColdFront, PIs can choose Projects sub-menu located under +the Project menu.

+

Projects sub-menu

+

Project

+

Clicking on the "Add a project" button will show the interface below:

+

Add A Project

+
+

Very Important: Project Title Length Limitation

+

Please ensure that the project title is both concise and does not exceed a +length of 63 characters.

+
+

PIs need to specify an appropriate title (less than 63 characters), description +of their research work that will be performed on the NERC (in one or two paragraphs), +the field(s) of science or research domain(s), and then click the "Save" button. +Once saved successfully, PIs effectively become the "manager" of the project, and +are free to add or remove users and also request resource allocation(s) to any Projects +for which they are the PI. PIs are permitted to add users to their group, request +new allocations, renew expiring allocations, and provide information such as +publications and grant data. PIs can maintain all their research information under +one project or, if they require, they can separate the work into multiple projects.

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/get-started/allocation/allocation-change-request/index.html b/get-started/allocation/allocation-change-request/index.html new file mode 100644 index 00000000..168ac288 --- /dev/null +++ b/get-started/allocation/allocation-change-request/index.html @@ -0,0 +1,4767 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Request change to Resource Allocation to an existing project

+

If past resource allocation is not sufficient for an existing project, PIs or project +managers can request a change by clicking "Request Change" button on project +resource allocation detail page as show below:

+

Request Change Resource Allocation

+

Request Change Resource Allocation Attributes for OpenStack Project

+

This will bring up the detailed Quota attributes for that project as shown below:

+

Request Change Resource Allocation Attributes for OpenStack Project

+
+

Important: Requested/Approved Allocated OpenStack Storage Quota & Cost

+

For NERC (OpenStack) resource types, the Storage quotas are controlled +by the values of the "OpenStack Volume Quota (GiB)" and "OpenStack Swift Quota +(GiB)" quota attributes. The Storage cost is determined by your requested +and approved allocation values +for these quota attributes. If you have common questions or need more information, +refer to our Billing FAQs +for comprehensive answers.

+
+

PI or project managers can provide a new value for the individual quota attributes, +and give justification for the requested changes so that the NERC admin can review +the change request and approve or deny based on justification and quota change request. +Then submitting the change request, this will notify the NERC admin about it. Please +wait untill the NERC admin approves/ deny the change request to see the change on +your resource allocation for the selected project.

+
+

Important Information

+

PI or project managers can put the new values on the textboxes for ONLY +quota attributes they want to change others they can be left blank so those +quotas will not get changed!

+

To use GPU resources on your VM, you need to specify the number of GPUs in the +"OpenStack GPU Quota" attribute. Additionally, ensure that your other quota +attributes, namely "OpenStack Compute vCPU Quota" and "OpenStack Compute RAM +Quota (MiB)" have sufficient resources to meet the vCPU and RAM requirements +for one of the GPU tier-based flavors. Refer to the GPU Tier documentation +for specific requirements and further details on the flavors available for GPU +usage.

+
+

Allocation Change Requests for OpenStack Project

+

Once the request is processed by the NERC admin, any user can view that request +change trails for the project by looking at the "Allocation Change Requests" +section that looks like below:

+

Allocation Change Requests for OpenStack Project

+

Any user can click on Action button to view the details about the change request. +This will show more details about the change request as shown below:

+

Allocation Change Request Details for OpenStack Project

+

How to Use GPU Resources in your OpenStack Project

+
+

Comparison Between CPU and GPU

+

To learn more about the key differences between CPUs and GPUs, please read this.

+
+

A GPU instance is launched in the same way +as any other compute instance, with a few considerations to keep in mind:

+
    +
  1. +

    When launching a GPU based instance, be sure to select one of the + GPU Tier + based flavor.

    +
  2. +
  3. +

    You need to have sufficient resource quota to launch the desired flavor. Always + ensure you know which GPU-based flavor you want to use, then submit an + allocation change request + to adjust your current allocation to fit the flavor's resource requirements.

    +
    +

    Resource Required for Launching a VM with "NVIDIA A100 SXM4 40GB" Flavor.

    +

    Based on the GPU Tier documentation, +NERC provides two variations of NVIDIA A100 SXM4 40GB flavors:

    +
      +
    1. gpu-su-a100sxm4.1: Includes 1 NVIDIA A100 GPU
    2. +
    3. gpu-su-a100sxm4.2: Includes 2 NVIDIA A100 GPUs
    4. +
    +

    You should select the flavor that best fits your resource needs and ensure your +OpenStack quotas are appropriately configured for the chosen flavor. To use +a GPU-based VM flavor, choose the one that best fits your resource needs and +make sure your OpenStack quotas meet the required specifications:

    +
      +
    • +

      For the gpu-su-a100sxm4.1 flavor:

      +
        +
      • vCPU: 32
      • +
      • RAM (GiB): 240
      • +
      +
    • +
    • +

      For the gpu-su-a100sxm4.2 flavor:

      +
        +
      • vCPU: 64
      • +
      • RAM (GiB): 480
      • +
      +
    • +
    +

    Ensure that your OpenStack resource quotas are configured as follows:

    +
      +
    • OpenStack GPU Quota: Meets or exceeds the number of GPUs required by the + chosen flavor.
    • +
    • OpenStack Compute vCPU Quota: Meets or exceeds the vCPU requirement.
    • +
    • OpenStack Compute RAM Quota (MiB): Meets or exceeds the RAM requirement.
    • +
    +

    Properly configure these quotas to successfully launch a VM with the selected +"gpu-su-a100sxm4" flavor.

    +
    +
  4. +
  5. +

    We recommend using ubuntu-22.04-x86_64 + as the image for your GPU-based instance because we have tested the NVIDIA driver + with this image and obtained good results. That said, it is possible to run a + variety of other images as well.

    +
  6. +
+

Request Change Resource Allocation Attributes for OpenShift Project

+

Request Change Resource Allocation Attributes for OpenShift Project

+
+

Important: Requested/Approved Allocated OpenShift Storage Quota & Cost

+

For NERC-OCP (OpenShift) resource types, the Storage quotas are controlled +by the values of the "OpenShift Request on Storage Quota (GiB)" and "OpenShift +Limit on Ephemeral Storage Quota (GiB)" quota attributes. The Storage cost +is determined by your requested and approved allocation values +for these quota attributes.

+
+

PI or project managers can provide a new value for the individual quota attributes, +and give justification for the requested changes so that the NERC admin can review +the change request and approve or deny based on justification and quota change request. +Then submitting the change request, this will notify the NERC admin about it. Please +wait untill the NERC admin approves/ deny the change request to see the change on +your resource allocation for the selected project.

+
+

Important Information

+

PI or project managers can put the new values on the textboxes for ONLY +quota attributes they want to change others they can be left blank so those +quotas will not get changed!

+

In order to use GPU resources on your pod, you must specify the number of GPUs +you want to use in the "OpenShift Request on GPU Quota" attribute.

+
+

Allocation Change Requests for OpenShift Project

+

Once the request is processed by the NERC admin, any user can view that request +change trails for the project by looking at the "Allocation Change Requests" +section that looks like below:

+

Allocation Change Requests for OpenShift Project

+

Any user can click on Action button to view the details about the change request. +This will show more details about the change request as shown below:

+

Allocation Change Request Details for OpenShift Project

+

How to Use GPU Resources in your OpenShift Project

+
+

Comparison Between CPU and GPU

+

To learn more about the key differences between CPUs and GPUs, please read this.

+
+

For OpenShift pods, we can specify different types of GPUs. Since OpenShift is not +based on flavors, we can customize the resources as needed at the pod level while +still utilizing GPU resources.

+

You can read about how to specify a pod to use a GPU here.

+

Also, you will be able to select a different GPU device for your workload, as +explained here.

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/get-started/allocation/allocation-details/index.html b/get-started/allocation/allocation-details/index.html new file mode 100644 index 00000000..a83c7be3 --- /dev/null +++ b/get-started/allocation/allocation-details/index.html @@ -0,0 +1,4696 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Allocation details

+

Access to ColdFront's allocations details is based on user roles. +PIs and managers see the same allocation details as users, and can also add +project users to the allocation, if they're not already on it, and remove users +from an allocation.

+

How to View Resource Allocation Details in the Project

+

A single project can have multiple allocations. To view details about a specific +resource allocation, click on any of the available allocations in the Allocations +section of the project details. Here, you can view the Resource Type, information +about your Allocated Project, status, End Date of the allocation, and Actions +button or any pending actions as shown below:

+

Veiwing Resource Allocation Details

+

Clicking the Action icon (shown as a folder icon on the right side of each allocation, +as seen in the image above) for the corresponding allocation will open a page +displaying detailed information about that allocation. You can access either the +PI and Manager View or General User View +of the allocation detail page for OpenStack or OpenShift Resource Allocation, +depending on your role in the project.

+

How to find ID of the Resource Allocation

+

After clicking the Action button for the corresponding allocation, you will be +redirected to a new allocation detail page. The web browser will display the URL +in the following format:

+
https://coldfront.mss.mghpcc.org/allocation/<Allocation_ID>/
+
+

To find the ID of the resource allocation, observe the URL and note the +<Allocation_ID> part. For example, in the URL https://coldfront.mss.mghpcc.org/allocation/1/, +the resource Allocation ID is 1.

+

PI and Manager View

+

PIs and managers can view important details of the project and underlying +allocations. It shows all allocations including start and end dates, creation +and last modified dates, users on the allocation and public allocation attributes. +PIs and managers can add or remove users from allocations.

+

PI and Manager Allocation View of OpenStack Resource Allocation

+

PI and Manager Allocation View of OpenStack Resource Allocation

+

PI and Manager Allocation View of OpenShift Resource Allocation

+

PI and Manager Allocation View of OpenShift Resource Allocation

+

General User View

+

General Users who are not PIs or Managers on a project see a read-only view of the +allocation details. If a user is on a project but not a particular allocation, they +will not be able to see the allocation in the Project view nor will they be able +to access the Allocation detail page.

+

General User View of OpenStack Resource Allocation

+

General User View of OpenStack Resource Allocation

+

General User View of OpenShift Resource Allocation

+

General User View of OpenShift Resource Allocation

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/get-started/allocation/archiving-a-project/index.html b/get-started/allocation/archiving-a-project/index.html new file mode 100644 index 00000000..ead730c3 --- /dev/null +++ b/get-started/allocation/archiving-a-project/index.html @@ -0,0 +1,4461 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Archiving an Existing Project

+

Only a PI can archive their ColdFront project(s) +by accessing NERC's ColdFront interface.

+
+

Important Note

+

If you archive a project then this will expire all your allocations on that +project, which will clean up and also disable your group's access to the resources +in those allocations. Also, you cannot make any changes to archived projects.

+

Alert Archiving a Project

+
+

Once archived it is no longer visible on your projects list. +All archived projects will be listed under your archived projects, +which can be viewed by clicking the "View archived projects" button as shown below:

+

View Archived Projects

+

All your archived projects are displayed here:

+

Archived Projects

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/get-started/allocation/coldfront/index.html b/get-started/allocation/coldfront/index.html new file mode 100644 index 00000000..29824804 --- /dev/null +++ b/get-started/allocation/coldfront/index.html @@ -0,0 +1,4591 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

What is NERC's ColdFront?

+

NERC uses NERC's ColdFront interface, an +open source resource allocation management system called +ColdFront to provide a single point-of-entry +for administration, reporting, and measuring scientific impact of NERC resources +for PI.

+
+

Learning ColdFront

+

A collection of animated gifs +showcasing common functions in ColdFront is available, providing helpful +insights into how these features can be utilized.

+
+

How to get access to NERC's ColdFront

+

Any users who had registerd their user accounts through the +MGHPCC Shared Services (MGHPCC-SS) Account Portal +also known as "RegApp" can get access to NERC's ColdFront interface.

+

General Users who are not PIs or Managers on a project see a read-only view of +the NERC's ColdFront as described here.

+

Whereas, once a PI Account request +is granted, the PI will receive an email confirming the request approval and +how to connect NERC's ColdFront.

+

PI or project managers can use NERC's ColdFront as a self-service web-portal that +can see an administrative view of it as +described here and can +do the following tasks:

+
    +
  • +

    Only PI can add a new project and archive any existing project(s)

    +
  • +
  • +

    Manage existing projects

    +
  • +
  • +

    Request allocations that fall under projects in NERC's resources such as clusters, + cloud resources, servers, storage, and software licenses

    +
  • +
  • +

    Add/remove user access to/from allocated resources who is a member of the project + without requiring system administrator interaction

    +
  • +
  • +

    Elevate selected users to 'manager' status, allowing them to handle some of the + PI asks such as request new resource allocations, add/remove users to/from resource + allocations, add project data such as grants and publications

    +
  • +
  • +

    Monitor resource utilization such as storage and cloud usage

    +
  • +
  • +

    Receive email notifications for expiring/renewing access to resources as well + as notifications when allocations change status - i.e. Active, Active (Needs + Renewal), Denied, Expired

    +
  • +
  • +

    Provide information such as grants, publications, and other reportable data for + periodic review by center director to demonstrate need for the resources

    +
  • +
+

How to login to NERC's ColdFront?

+

NERC's ColdFront interface provides users with +login page as shown here:

+

ColdFront Login Page

+

Please click on "Log In" button. Then, it will show the login interface as +shown below:

+

ColdFront Login Interface

+

You need to click on "Log in via OpenID Connect" button. This will redirect you +to CILogon welcome page where you can select your appropriate Identity Provider +as shown below:

+

CILogon Welcome Page

+

Once successful, you will be redirected to the ColdFront's main dashboard as shown +below:

+

ColdFront Dashboard

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/get-started/allocation/images/CILogon.png b/get-started/allocation/images/CILogon.png new file mode 100644 index 00000000..8931dd9d Binary files /dev/null and b/get-started/allocation/images/CILogon.png differ diff --git a/get-started/allocation/images/adding_new_resource_allocations.png b/get-started/allocation/images/adding_new_resource_allocations.png new file mode 100644 index 00000000..34e7cf86 Binary files /dev/null and b/get-started/allocation/images/adding_new_resource_allocations.png differ diff --git a/get-started/allocation/images/archived_projects_list.png b/get-started/allocation/images/archived_projects_list.png new file mode 100644 index 00000000..c6f12b21 Binary files /dev/null and b/get-started/allocation/images/archived_projects_list.png differ diff --git a/get-started/allocation/images/archiving_project_alert.png b/get-started/allocation/images/archiving_project_alert.png new file mode 100644 index 00000000..b3b8e852 Binary files /dev/null and b/get-started/allocation/images/archiving_project_alert.png differ diff --git a/get-started/allocation/images/coldfront-activate-expiring-allocation.png b/get-started/allocation/images/coldfront-activate-expiring-allocation.png new file mode 100644 index 00000000..4dcd1adb Binary files /dev/null and b/get-started/allocation/images/coldfront-activate-expiring-allocation.png differ diff --git a/get-started/allocation/images/coldfront-add-a-project.png b/get-started/allocation/images/coldfront-add-a-project.png new file mode 100644 index 00000000..c3c84c2d Binary files /dev/null and b/get-started/allocation/images/coldfront-add-a-project.png differ diff --git a/get-started/allocation/images/coldfront-add-remove-users.png b/get-started/allocation/images/coldfront-add-remove-users.png new file mode 100644 index 00000000..116a21ae Binary files /dev/null and b/get-started/allocation/images/coldfront-add-remove-users.png differ diff --git a/get-started/allocation/images/coldfront-add-user-to-project.png b/get-started/allocation/images/coldfront-add-user-to-project.png new file mode 100644 index 00000000..c2b6b10a Binary files /dev/null and b/get-started/allocation/images/coldfront-add-user-to-project.png differ diff --git a/get-started/allocation/images/coldfront-add-users-to-allocation.png b/get-started/allocation/images/coldfront-add-users-to-allocation.png new file mode 100644 index 00000000..150eee29 Binary files /dev/null and b/get-started/allocation/images/coldfront-add-users-to-allocation.png differ diff --git a/get-started/allocation/images/coldfront-allocation-renewal-requested.png b/get-started/allocation/images/coldfront-allocation-renewal-requested.png new file mode 100644 index 00000000..e9d757ed Binary files /dev/null and b/get-started/allocation/images/coldfront-allocation-renewal-requested.png differ diff --git a/get-started/allocation/images/coldfront-change-user-role.png b/get-started/allocation/images/coldfront-change-user-role.png new file mode 100644 index 00000000..1e664e7d Binary files /dev/null and b/get-started/allocation/images/coldfront-change-user-role.png differ diff --git a/get-started/allocation/images/coldfront-dashboard.png b/get-started/allocation/images/coldfront-dashboard.png new file mode 100644 index 00000000..940b4cc0 Binary files /dev/null and b/get-started/allocation/images/coldfront-dashboard.png differ diff --git a/get-started/allocation/images/coldfront-login-interface.png b/get-started/allocation/images/coldfront-login-interface.png new file mode 100644 index 00000000..df0a4b80 Binary files /dev/null and b/get-started/allocation/images/coldfront-login-interface.png differ diff --git a/get-started/allocation/images/coldfront-login-page.png b/get-started/allocation/images/coldfront-login-page.png new file mode 100644 index 00000000..2831ad43 Binary files /dev/null and b/get-started/allocation/images/coldfront-login-page.png differ diff --git a/get-started/allocation/images/coldfront-openshift-allocation-attributes.png b/get-started/allocation/images/coldfront-openshift-allocation-attributes.png new file mode 100644 index 00000000..f73a7fcf Binary files /dev/null and b/get-started/allocation/images/coldfront-openshift-allocation-attributes.png differ diff --git a/get-started/allocation/images/coldfront-openshift-allocation-change-requests.png b/get-started/allocation/images/coldfront-openshift-allocation-change-requests.png new file mode 100644 index 00000000..2a7b47a8 Binary files /dev/null and b/get-started/allocation/images/coldfront-openshift-allocation-change-requests.png differ diff --git a/get-started/allocation/images/coldfront-openshift-allocation-general-user-view.png b/get-started/allocation/images/coldfront-openshift-allocation-general-user-view.png new file mode 100644 index 00000000..74c9f678 Binary files /dev/null and b/get-started/allocation/images/coldfront-openshift-allocation-general-user-view.png differ diff --git a/get-started/allocation/images/coldfront-openshift-allocation-pi-manager-view.png b/get-started/allocation/images/coldfront-openshift-allocation-pi-manager-view.png new file mode 100644 index 00000000..2b3e5f50 Binary files /dev/null and b/get-started/allocation/images/coldfront-openshift-allocation-pi-manager-view.png differ diff --git a/get-started/allocation/images/coldfront-openshift-change-requested-details.png b/get-started/allocation/images/coldfront-openshift-change-requested-details.png new file mode 100644 index 00000000..b9ce9f67 Binary files /dev/null and b/get-started/allocation/images/coldfront-openshift-change-requested-details.png differ diff --git a/get-started/allocation/images/coldfront-openstack-allocation-attributes.png b/get-started/allocation/images/coldfront-openstack-allocation-attributes.png new file mode 100644 index 00000000..8e237fd1 Binary files /dev/null and b/get-started/allocation/images/coldfront-openstack-allocation-attributes.png differ diff --git a/get-started/allocation/images/coldfront-openstack-allocation-change-requests.png b/get-started/allocation/images/coldfront-openstack-allocation-change-requests.png new file mode 100644 index 00000000..0e424244 Binary files /dev/null and b/get-started/allocation/images/coldfront-openstack-allocation-change-requests.png differ diff --git a/get-started/allocation/images/coldfront-openstack-allocation-general-user-view.png b/get-started/allocation/images/coldfront-openstack-allocation-general-user-view.png new file mode 100644 index 00000000..1e252430 Binary files /dev/null and b/get-started/allocation/images/coldfront-openstack-allocation-general-user-view.png differ diff --git a/get-started/allocation/images/coldfront-openstack-allocation-pi-manager-view.png b/get-started/allocation/images/coldfront-openstack-allocation-pi-manager-view.png new file mode 100644 index 00000000..de1bdf59 Binary files /dev/null and b/get-started/allocation/images/coldfront-openstack-allocation-pi-manager-view.png differ diff --git a/get-started/allocation/images/coldfront-openstack-change-requested-details.png b/get-started/allocation/images/coldfront-openstack-change-requested-details.png new file mode 100644 index 00000000..a5458748 Binary files /dev/null and b/get-started/allocation/images/coldfront-openstack-change-requested-details.png differ diff --git a/get-started/allocation/images/coldfront-pi-add-users-on-allocation.png b/get-started/allocation/images/coldfront-pi-add-users-on-allocation.png new file mode 100644 index 00000000..447c3947 Binary files /dev/null and b/get-started/allocation/images/coldfront-pi-add-users-on-allocation.png differ diff --git a/get-started/allocation/images/coldfront-project-review-notifications.png b/get-started/allocation/images/coldfront-project-review-notifications.png new file mode 100644 index 00000000..6ec48c57 Binary files /dev/null and b/get-started/allocation/images/coldfront-project-review-notifications.png differ diff --git a/get-started/allocation/images/coldfront-project-review-pending-status.png b/get-started/allocation/images/coldfront-project-review-pending-status.png new file mode 100644 index 00000000..6936749c Binary files /dev/null and b/get-started/allocation/images/coldfront-project-review-pending-status.png differ diff --git a/get-started/allocation/images/coldfront-project-review-steps.png b/get-started/allocation/images/coldfront-project-review-steps.png new file mode 100644 index 00000000..340520a5 Binary files /dev/null and b/get-started/allocation/images/coldfront-project-review-steps.png differ diff --git a/get-started/allocation/images/coldfront-project-review.png b/get-started/allocation/images/coldfront-project-review.png new file mode 100644 index 00000000..5f1a5af9 Binary files /dev/null and b/get-started/allocation/images/coldfront-project-review.png differ diff --git a/get-started/allocation/images/coldfront-project.png b/get-started/allocation/images/coldfront-project.png new file mode 100644 index 00000000..8f52a50b Binary files /dev/null and b/get-started/allocation/images/coldfront-project.png differ diff --git a/get-started/allocation/images/coldfront-projects-sub-menu.png b/get-started/allocation/images/coldfront-projects-sub-menu.png new file mode 100644 index 00000000..c272cd08 Binary files /dev/null and b/get-started/allocation/images/coldfront-projects-sub-menu.png differ diff --git a/get-started/allocation/images/coldfront-remove-users-from-a-project.png b/get-started/allocation/images/coldfront-remove-users-from-a-project.png new file mode 100644 index 00000000..bdd38b0b Binary files /dev/null and b/get-started/allocation/images/coldfront-remove-users-from-a-project.png differ diff --git a/get-started/allocation/images/coldfront-remove-users-from-allocation.png b/get-started/allocation/images/coldfront-remove-users-from-allocation.png new file mode 100644 index 00000000..0c09293c Binary files /dev/null and b/get-started/allocation/images/coldfront-remove-users-from-allocation.png differ diff --git a/get-started/allocation/images/coldfront-renewed-allocation.png b/get-started/allocation/images/coldfront-renewed-allocation.png new file mode 100644 index 00000000..5143b977 Binary files /dev/null and b/get-started/allocation/images/coldfront-renewed-allocation.png differ diff --git a/get-started/allocation/images/coldfront-request-a-new-openshift-allocation.png b/get-started/allocation/images/coldfront-request-a-new-openshift-allocation.png new file mode 100644 index 00000000..9df1fdc4 Binary files /dev/null and b/get-started/allocation/images/coldfront-request-a-new-openshift-allocation.png differ diff --git a/get-started/allocation/images/coldfront-request-a-new-openstack-allocation.png b/get-started/allocation/images/coldfront-request-a-new-openstack-allocation.png new file mode 100644 index 00000000..cdf3ec6a Binary files /dev/null and b/get-started/allocation/images/coldfront-request-a-new-openstack-allocation.png differ diff --git a/get-started/allocation/images/coldfront-request-change-allocation.png b/get-started/allocation/images/coldfront-request-change-allocation.png new file mode 100644 index 00000000..764ecad3 Binary files /dev/null and b/get-started/allocation/images/coldfront-request-change-allocation.png differ diff --git a/get-started/allocation/images/coldfront-request-new-openshift-allocation-with-users.png b/get-started/allocation/images/coldfront-request-new-openshift-allocation-with-users.png new file mode 100644 index 00000000..62060e56 Binary files /dev/null and b/get-started/allocation/images/coldfront-request-new-openshift-allocation-with-users.png differ diff --git a/get-started/allocation/images/coldfront-request-new-openshift-allocation.png b/get-started/allocation/images/coldfront-request-new-openshift-allocation.png new file mode 100644 index 00000000..b50228b3 Binary files /dev/null and b/get-started/allocation/images/coldfront-request-new-openshift-allocation.png differ diff --git a/get-started/allocation/images/coldfront-request-new-openstack-allocation-with-users.png b/get-started/allocation/images/coldfront-request-new-openstack-allocation-with-users.png new file mode 100644 index 00000000..d715d132 Binary files /dev/null and b/get-started/allocation/images/coldfront-request-new-openstack-allocation-with-users.png differ diff --git a/get-started/allocation/images/coldfront-request-new-openstack-allocation.png b/get-started/allocation/images/coldfront-request-new-openstack-allocation.png new file mode 100644 index 00000000..051328c6 Binary files /dev/null and b/get-started/allocation/images/coldfront-request-new-openstack-allocation.png differ diff --git a/get-started/allocation/images/coldfront-search-multiple-users.png b/get-started/allocation/images/coldfront-search-multiple-users.png new file mode 100644 index 00000000..8f23e99c Binary files /dev/null and b/get-started/allocation/images/coldfront-search-multiple-users.png differ diff --git a/get-started/allocation/images/coldfront-submit-allocation-activation.png b/get-started/allocation/images/coldfront-submit-allocation-activation.png new file mode 100644 index 00000000..c6f88003 Binary files /dev/null and b/get-started/allocation/images/coldfront-submit-allocation-activation.png differ diff --git a/get-started/allocation/images/coldfront-user-details.png b/get-started/allocation/images/coldfront-user-details.png new file mode 100644 index 00000000..e643edea Binary files /dev/null and b/get-started/allocation/images/coldfront-user-details.png differ diff --git a/get-started/allocation/images/coldfront-user-search.png b/get-started/allocation/images/coldfront-user-search.png new file mode 100644 index 00000000..eca6e454 Binary files /dev/null and b/get-started/allocation/images/coldfront-user-search.png differ diff --git a/get-started/allocation/images/coldfront-users-notification.png b/get-started/allocation/images/coldfront-users-notification.png new file mode 100644 index 00000000..454371a9 Binary files /dev/null and b/get-started/allocation/images/coldfront-users-notification.png differ diff --git a/get-started/allocation/images/needs_renew_allocation.png b/get-started/allocation/images/needs_renew_allocation.png new file mode 100644 index 00000000..dbf0f78c Binary files /dev/null and b/get-started/allocation/images/needs_renew_allocation.png differ diff --git a/get-started/allocation/images/new_resource_allocation.png b/get-started/allocation/images/new_resource_allocation.png new file mode 100644 index 00000000..19d81582 Binary files /dev/null and b/get-started/allocation/images/new_resource_allocation.png differ diff --git a/get-started/allocation/images/renew_expiring_allocation.png b/get-started/allocation/images/renew_expiring_allocation.png new file mode 100644 index 00000000..216599d4 Binary files /dev/null and b/get-started/allocation/images/renew_expiring_allocation.png differ diff --git a/get-started/allocation/images/view_archived_projects.png b/get-started/allocation/images/view_archived_projects.png new file mode 100644 index 00000000..c1dd112d Binary files /dev/null and b/get-started/allocation/images/view_archived_projects.png differ diff --git a/get-started/allocation/images/viewing_resource_allocation_details.png b/get-started/allocation/images/viewing_resource_allocation_details.png new file mode 100644 index 00000000..a92ef2ec Binary files /dev/null and b/get-started/allocation/images/viewing_resource_allocation_details.png differ diff --git a/get-started/allocation/manage-users-to-a-project/index.html b/get-started/allocation/manage-users-to-a-project/index.html new file mode 100644 index 00000000..846a7ad1 --- /dev/null +++ b/get-started/allocation/manage-users-to-a-project/index.html @@ -0,0 +1,4651 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Managing Users in the Project

+

Add/Remove User(s) to/from a Project

+

A user can only view projects they are on. PIs or managers can add or remove users +from their respective projects by navigating to the Users section of the project.

+

Add/Remove Users from Project

+

Once we click on the "Add Users" button, it will show us the following search interface:

+

User Search Interface

+
+

Searching multiple users at once!

+

If you want to simultaneously search for multiple users in the system, you +can input multiple usernames separated by space or newline, as shown below:

+

Searching Multiple User(s)

+

NOTE: This will return a list of all users matching those provided usernames +only if they exist.

+
+

They can search for any users in the system that are not already part of the project +by providing exact matched username or partial text of other multiple fields. The +search results show details about the user account such as email address, username, +first name, last name etc. as shown below:

+

Add User(s) To Project

+
+

Delegating user as 'Manager'

+

When adding a user to your project you can optionally designate them as a +"Manager" by selecting their role using the drop down next to their email. +Read more about user roles here.

+
+

Thus, found user(s) can be selected and assigned directly to the available resource +allocation(s) on the given project using this interface. While adding the users, +their Role also can be selected from the dropdown options as either User or Manager. +Once confirmed with selection of user(s) their roles and allocations, click on the +"Add Selected Users to Project" button.

+

Removing Users from the Project is straightforward by just clicking on the +"Remove Users" button. Then it shows the following interface:

+

Remove User(s) From A Project

+

PI or project managers can select the user(s) and then click on the "Remove Selected +Users From Project" button.

+

User Roles

+

Access to ColdFront is role based so users see a read-only view of the allocation +details for any allocations they are on. PIs see the same allocation details as general +users and can also add project users to the allocation if they're not already on +it. Even on the first time, PIs add any user to the project as the User role. Later +PI or project managers can delegate users on their project to the 'manager' role. +This allows multiple managers on the same project. This provides the user with the +same access and abilities as the PI. A "Manager" is a user who has the same +permissions as the PI to add/remove users, request/renew allocations, +add/remove project info such as grants, publications, and research output. +Managers may also complete the annual project review.

+
+

What can a PI do that a manager can't?

+

The only tasks a PI can do that a manager can't is create a new project or +archive any existing project(s). All other project-related actions that a PI +can perform can also be accomplished by any one of the managers assigned to +that project.

+
+

General User Accounts are not able to create/update projects and request Resource +Allocations. Instead, these accounts must be associated with a Project that has +Resources. General User accounts that are associated with a Project have access +to view their project details and use all the resources associated with the Project +on NERC.

+

General Users (not PIs or Managers) can turn off email notifications at the project +level. PIs also have the 'manager' status on a project. Managers can't turn off their +notifications. This ensures they continue to get allocation expiration notification +emails.

+

Delegating User to Manager Role

+

You can also modify a users role of existing project users at any +time by clicking on the Edit button next to the user's name.

+

To change a user's role to 'manager' click on the edit icon next to the user's name +on the Project Detail page:

+

Change User Role

+

Then toggle the "Role" from User to Manager:

+

User Details

+
+

Very Important

+

Make sure to click the "Update" button to save the change.

+

This delegation of "Manager" role can also be done when adding a user to your +project. You can optionally designate them as a "Manager" by selecting their +role using the drop down next to their email as described here.

+
+

Notifications

+

All users on a project will receive notifications about allocations including +reminders of upcoming expiration dates and status changes. Users may uncheck +the box next to their username to turn off notifications. Managers and PIs on +the project are not able to turn off notifications.

+

User Notifications

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/get-started/allocation/managing-users-to-an-allocation/index.html b/get-started/allocation/managing-users-to-an-allocation/index.html new file mode 100644 index 00000000..21ef9924 --- /dev/null +++ b/get-started/allocation/managing-users-to-an-allocation/index.html @@ -0,0 +1,4461 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Adding and removing project Users to project Resource Allocation

+

Any available users who were not added previously on a given project can be added +to resource allocation by clicking on the "Add Users" button as shown below:

+

Adding and removing project User(s) to project Allocation

+

Once Clicked it will show the following interface where PIs can select the available +user(s) on the checkboxes and click on the "Add Selected Users to Allocation" button.

+

Add Selected User(s) to Allocation

+
+

Very Important

+

The desired user must already be on the project to be added to the allocation.

+
+

Removing Users from the Resource Allocation is straightforward by just clicking on +the "Remove Users" button. Then it shows the following interface:

+

Removing User(s) from the Resource Allocation

+

PI or project managers can select the user(s) on the checkboxes and then click on +the "Remove Selected Users From Project" button.

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/get-started/allocation/project-and-allocation-review/index.html b/get-started/allocation/project-and-allocation-review/index.html new file mode 100644 index 00000000..f03dd420 --- /dev/null +++ b/get-started/allocation/project-and-allocation-review/index.html @@ -0,0 +1,4668 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Project and Individual Allocation Annual Review Process

+

Project Annual Review Process

+

NERC's ColdFront allows annual project reviews for NERC admins by mandating PIs +to assess and update their projects. With the Project Review feature activated, +each project undergoes a mandatory review every 365 days. During this process, +PIs update project details, confirm project members, and input publications, +grants, and research outcomes from the preceding year.

+
+

Required Project Review

+

The PI or any manager(s) of a project must complete the project review once +every 365 days. ColdFront does not send notifications to PIs when project reviews +are due. Instead, when the PI or Manager(s) of a project views their project +they will find the notification that the project review is due. Additionally, +when the project review is pending, PIs or Project Manager(s) cannot request +new allocations or renew expiring allocations or change request to update the +allocated allocation attributes' values. This is to enforce PIs need to +review their projects annually. The PI or any managers on the project are +able to complete the project review process.

+
+

Project Reviews by PIs or Project Manager(s)

+

When a PI or any Project Manager(s) of a project logs into NERC's ColdFront web +console and their project review is due, they will see a banner next to the +project name on the home page:

+

Project Review

+

If they try to request a new allocation or renew an expiring allocation or change +request to update the allocated allocation attributes' values, they will get an +error message:

+

Project Review Pending Notification

+

Project Review Steps

+

When they click on the "Review Project" link they're presented with the requirements +and a description of why we're asking for this update:

+

Project Review Submit Details

+

The links in each step direct them to different parts of their Project Detail page. +This review page lists the dates when grants and publications were last updated. +If there are no grant or publications or at least one of them hasn't been udpated +in the last year, we ask for a reason they're not updating the project information. +This helps encourage PIs to provide updates if they have them. If not, they +provide a reason and this is displayed for the NERC admins as part of the review +process.

+

Once the project review page is completed, the PI is redirected to the project +detail page and they see the status change to "project review pending".

+

Project Review Pending Status

+

Allocation Renewals

+

When the requested allocation is approved, it must have an "End Date" - which +is normally 365 days or 1 year from the date it is approved i.e. "Start Date". +Automated emails are triggered to all users on an allocation when the end date +is 60 days away, 30 days, 7 days, and then set the allocation status to +"Active (Needs Renewal)", unless the user turns off notifications on the project.

+
+

Very Important: Urgent Allocation Renewal is Required Before End Date

+

If the allocation renewal isn't processed prior to the original allocation +end date by the PI or Manager, the allocation will set the allocation status +to "Active (Needs Renewal)" and the allocation users will get a notification +email letting them know the allocation needs renewal!

+

Allocation Renewal Prior End Date

+

Currently, a project will continue to be able to utilize allocations even after +the allocation end date, which will result in ongoing costs for you. Such +allocation will be marked as "Active (Needs Renewal)" as shown below:

+

Allocation Needs Renewal After End Date

+
+

Allocation renewals may not require any additions or changes to the allocation +attributes from the PI or Manager. By default, if the PI or Manager clicks on +the 'Activate' button as shown below:

+

ColdFront Activate Expiring Allocation

+

Then it will prompt for confirmation and allow the admin to review and submit the +activation request by clicking on 'Submit' button as shown below:

+

ColdFront Allocation Renewal Submit

+

Emails are sent to all allocation users letting them know the renewal request has +been submitted.

+

Then the allocation status will change to "Renewal Requested" as shown below:

+

ColdFront Allocation Renewal Requested

+

Once the renewal request is reviewed and approved by NERC admins, it will change +into "Active" status and the expiration date is set to another 365 days as shown +below:

+

ColdFront Allocation Renewal Successful

+

Then an automated email notification will be sent to the PI and all users on the +allocation that have enabled email notifications.

+

Cost Associated with Allocations that Need Renewal after "End Date"

+

Currently, a project will continue be able to utilize allocations even after their +"End Date", resulting in ongoing costs for you. Such allocations will be +marked as "Active (Needs Renewal)". In the future, we plan to change this behavior +so that allocations after end date will prevent associated VMs/pods from starting +and may cause active VMs/pods to cease running.

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/get-started/allocation/requesting-an-allocation/index.html b/get-started/allocation/requesting-an-allocation/index.html new file mode 100644 index 00000000..1c58ea5c --- /dev/null +++ b/get-started/allocation/requesting-an-allocation/index.html @@ -0,0 +1,4750 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

How to request a new Resource Allocation

+

On the Project Detail page the project PI/manager(s) can request an allocation +by clicking the "Request Resource Allocation" button as shown below:

+

Requesting an Allocation

+

On the shown page, you will be able to choose either OpenStack Resource Allocation +or OpenShift Resource Allocation by specifying either NERC (OpenStack) or +NERC-OCP (OpenShift) in the Resource dropdown option. Note: The +first option i.e. NERC (OpenStack), is selected by default.

+
+

Default GPU Resource Quota for Initial Allocation Requests

+

By default, the GPU resource quota is set to 0 for the initial resource +allocation request for both OpenStack and OpenShift Resource Types. However, +you will be able to change request and adjust +the corresponding GPU quotas for both after they are approved for the first +time. For NERC's OpenStack, please follow this guide +on how to utilize GPU resources in your OpenStack project. For NERC's OpenShift, +refer to this reference +to learn about how to use GPU resources in pod level.

+
+

Request A New OpenStack Resource Allocation for an OpenStack Project

+

Request A New OpenStack Resource Allocation

+

If users have already been added to the project as +described here, the Users selection section +will be displayed as shown below:

+

Request A New OpenStack Resource Allocation Selecting Users

+

In this section, the project PI/manager(s) can choose user(s) from the project +to be included in this allocation before clicking the "Submit" button.

+
+

Read the End User License Agreement Before Submission

+

You should read the shown End User License Agreement (the "Agreement"). +By clicking the "Submit" button, you agree to the Terms and Conditions.

+
+
+

Important: Requested/Approved Allocated OpenStack Storage Quota & Cost

+

Ensure you choose NERC (OpenStack) in the Resource option and specify your +anticipated computing units. Each allocation, whether requested or approved, +will be billed based on the pay-as-you-go model. The exception is for +Storage quotas, where the cost is determined by your requested and approved +allocation values +to reserve storage from the total NESE storage pool. For NERC (OpenStack) +Resource Allocations, the Storage quotas are specified by the "OpenStack +Volume Quota (GiB)" and "OpenStack Swift Quota (GiB)" allocation attributes. +If you have common questions or need more information, refer to our +Billing FAQs for comprehensive +answers. Keep in mind that you can easily scale and expand your current resource +allocations within your project by following this documentation +later on.

+
+

Resource Allocation Quotas for OpenStack Project

+

The amount of quota to start out a resource allocation after approval, can be +specified using an integer field in the resource allocation request form as shown +above. The provided unit value is computed as PI or project managers request +resource quota. The basic unit of computational resources is defined in terms of +integer value that corresponds to multiple OpenStack resource quotas. For example, +1 Unit corresponds to:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Resource NameQuota Amount x Unit
Instances1
vCPUs1
GPU0
RAM(MiB)4096
Volumes2
Volume Storage(GiB)20
Object Storage(GiB)1
+
+

Information

+

By default, 2 OpenStack Floating IPs, 10 Volume Snapshots and 10 Security +Groups are provided to each approved project regardless of units of requested +quota units.

+
+

Request A New OpenShift Resource Allocation for an OpenShift project

+

Request A New OpenShift Resource Allocation

+

If users have already been added to the project as +described here, the Users selection section +will be displayed as shown below:

+

Request A New OpenShift Resource Allocation Selecting Users

+

In this section, the project PI/manager(s) can choose user(s) from the project +to be included in this allocation before clicking the "Submit" button.

+
+

Read the End User License Agreement Before Submission

+

You should read the shown End User License Agreement (the "Agreement"). +By clicking the "Submit" button, you agree to the Terms and Conditions.

+
+

Resource Allocation Quotas for OpenShift Project

+

The amount of quota to start out a resource allocation after approval, can be +specified using an integer field in the resource allocation request form as shown +above. The provided unit value is computed as PI or project managers request +resource quota. The basic unit of computational resources is defined in terms of +integer value that corresponds to multiple OpenShift resource quotas. For example, +1 Unit corresponds to:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Resource NameQuota Amount x Unit
vCPUs1
GPU0
RAM(MiB)4096
Persistent Volume Claims (PVC)2
Storage(GiB)20
Ephemeral Storage(GiB)5
+
+

Important: Requested/Approved Allocated OpenShift Storage Quota & Cost

+

Ensure you choose NERC-OCP (OpenShift) in the Resource option (Always Remember: +the first option, i.e. NERC (OpenStack) is selected by default!) and specify +your anticipated computing units. Each allocation, whether requested or approved, +will be billed based on the pay-as-you-go model. The exception is for +Storage quotas, where the cost is determined by +your requested and approved allocation values +to reserve storage from the total NESE storage pool. For NERC-OCP (OpenShift) +Resource Allocations, storage quotas are specified by the "OpenShift Request +on Storage Quota (GiB)" and "OpenShift Limit on Ephemeral Storage Quota (GiB)" +allocation attributes. If you have common questions or need more information, +refer to our Billing FAQs +for comprehensive answers. Keep in mind that you can easily scale and expand +your current resource allocations within your project by following +this documentation +later on.

+
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/get-started/best-practices/best-practices-for-bu/index.html b/get-started/best-practices/best-practices-for-bu/index.html new file mode 100644 index 00000000..e7b5fa23 --- /dev/null +++ b/get-started/best-practices/best-practices-for-bu/index.html @@ -0,0 +1,4506 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + + + + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/get-started/best-practices/best-practices-for-harvard/index.html b/get-started/best-practices/best-practices-for-harvard/index.html new file mode 100644 index 00000000..11f521fa --- /dev/null +++ b/get-started/best-practices/best-practices-for-harvard/index.html @@ -0,0 +1,5014 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Securing Your Public Facing Server

+

Overview

+

This document is aimed to provide you with a few concrete actions you can take +to significantly enhance the security of your devices. This advice can be +enabled even if your servers are not public facing. However, we strongly +recommend implementing these steps if your servers are intended to be accessible +to the internet at large.

+

All recommendations and guidance are guided by our policy that has specific +requirements, the current policy/requirements for servers at NERC can be +found here.

+
+

Harvard University Security Policy Information

+

Please note that all assets deployed to your NERC project must be compliant +with University Security policies. Please familiarize yourself with the +Harvard University Information Security Policy +and your role in securing data. If you have any questions about how Security +should be implemented in the Cloud, please contact your school security +officer: "Havard Security Officer".

+
+

Know Your Data

+

Depending on the data that exists on your servers, you may have to take added or +specific steps to safeguard that data. At Harvard, we developed a scale of data +classification ranging from 1 to 5 in order of increasing data sensitivity.

+

We have prepared added guidance with examples for both +Administrative Data +and Research Data.

+

Additionally, if your work involved individuals situated in a European Economic +Area, you may be subject to the requirements of the General Data Protection +Regulations and more information about your responsibilities can be found +here.

+

Host Protection

+

The primary focus of this guide is to provide you with security essentials that +we support and that you can implement with little effort.

+

Endpoint Protection

+

Harvard University uses the endpoint protection service: Crowdstrike, which +actively checks a machine for indication of malicious activity and will act to +both block the activity and remediate the issue. This service is offered free to +our community members and requires the installation of an agent on the server +that runs transparently. This software enables the Harvard security team to +review security events and act as needed.

+

Crowdstrike can be downloaded from our repository at: +agents.itsec.harvard.edu this software is required +for all devices owned by Harvard staff/faculty and available for all operating +systems.

+
+

Please note

+

To acess this repository you need to be in Harvard Campus Network.

+
+

Patch/Update Regularly

+

It is common that vendors/developers will announce that they have discovered a new +vulnerability in the software you may be using. A lot of these vulnerabilities +are addressed by new releases that the developer issues. Keeping your software +and server operating system up to date with current versions ensures that you are +using a version of the software that does not have any known/published vulnerabilities.

+

Vulnerability Management

+

Various software versions have historically been found to be vulnerable to specific +attacks and exploits. The risk of running older versions of software is that you +may be exposing your machine to a possible known method of attack.

+

To assess which attacks you might be vulnerable to and be provided with specific +remediation guidance, we recommend enrolling your servers with our Tenable service +which periodically scans the software on your server and correlates the software +information with a database of published vulnerabilities. This service will enable +you to prioritize which component you need to upgrade or otherwise define which +vulnerabilities you may be exposed to.

+

The Tenable agent run transparently and can be enabled to work according to +the parameters set for your school; the agent can be downloaded +here and configuration support can be found +by filing a support request via HUIT support ticketing system: +ServiceNow.

+

Safer Applications/ Development

+

Every application has its own unique operational constraints/requirements, and +the advice below cannot be comprehensive however we can offer a few general recommendations

+

Secure Credential Management

+

Credentials should not be kept on the server, nor should they be included directly +in your programming logic.

+

Attackers often review running code on the server to see if they can obtain any +sensitive credentials that may have been included in each script. To better +manage your credentials, we recommend either using:

+ +

Not Running the Application as the Root/Superuser

+

Frequently an application needs special permissions and access and often it is +easiest to run an application in the root/superuser account. This is a dangerous +practice since the application, when compromised, gives attackers an account with +full administrative privileges. Instead, configuring the application to run with +an account with only the permissions it needs to run is a way to minimize the +impact of a given compromise.

+

Safer Networking

+

The goal in safer networking is to minimize the areas that an attacker can target.

+

Minimize Publicly Exposed Services

+

Every port/service open to the internet will be scanned to access your servers. +We recommend that any service/port that is not needed to be accessed by the +public be placed behind the campus firewall. This will significantly reduce the +number of attempts by attackers to compromise your servers.

+

In practice this usually means that you only expose posts 80/443 which enables +you to serve websites, while you keep all other services such as SSH, +WordPress-logins, etc behind the campus firewall.

+

Strengthen SSH Logins

+

Where possible, and if needed, logins to a Harvard service should be placed behind +Harvardkey. For researchers however, the preferred login method is usually SSH +and we recommend the following ways to strengthen your SSH accounts

+
    +
  1. +

    Disable password only logins

    +
      +
    • +

      In file /etc/ssh/sshd_config change PasswordAuthentication to no to + disable tunneled clear text passwords i.e. PasswordAuthentication no.

      +
    • +
    • +

      Uncomment the permit empty passwords option in the second line, and, if + needed, change yes to no i.e. PermitEmptyPasswords no.

      +
    • +
    • +

      Then run service ssh restart.

      +
    • +
    +
  2. +
  3. +

    Use SSH keys with passwords enabled on them

    +
  4. +
  5. +

    If possible, enroll the SSH service with a Two-factor authentication provider + such as DUO or YubiKey.

    +
  6. +
+

Attack Detection

+

Despite the best protection, a sophisticated attacker may still find a way to +compromise your servers and in those scenarios, we want to enhance your ability +to detect activity that may be suspicious.

+

Install Crowdstrike

+

As stated above, Crowdstrike is both an endpoint protection service and also an +endpoint detection service. This software understands activities that might be +benign in isolation but coupled with other actions on the device may be +indicative of a compromise. It also enables the quickest security response.

+

Crowdstrike can be downloaded from our repository at: +agents.itsec.harvard.edu +this software is needed for all devices owned by Harvard staff/faculty and +available for all operating systems.

+

Safeguard your System Logs

+

System logs are logs that check and track activity on your servers, including +logins, installed applications, errors and more.

+

Sophisticated attackers will try to delete these logs to frustrate investigations +and prevent discovery of their attacks. To ensure that your logs are still +accessible and available for review, we recommend that you configure your logs +to be sent to a system separate from your servers. This can be either sending logs +to an external file storage repository. Or configuring a separate logging system +using Splunk.

+

For help setting up logging please file a support request via our support +ticketing system: ServiceNow.

+

Escalating an Issue

+

There are several ways you can report a security issue and they are all documented +on HUIT Internet Security and Data Privacy group site.

+

In the event you suspect a security issue has occurred or wanted someone to supply +a security assessment, please feel free to reach out to the HUIT Internet Security +and Data Privacy group, specifically the Operations & Engineering team.

+ +

Further References

+

https://policy.security.harvard.edu/all-servers

+

https://enterprisearchitecture.harvard.edu/security-minimal-viable-product-requirements-huit-hostedmanaged-server-instances

+

https://policy.security.harvard.edu/security-requirements

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/get-started/best-practices/best-practices-for-my-institution/index.html b/get-started/best-practices/best-practices-for-my-institution/index.html new file mode 100644 index 00000000..27a84cf4 --- /dev/null +++ b/get-started/best-practices/best-practices-for-my-institution/index.html @@ -0,0 +1,4522 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Best Practices for My Institution

+

Institutions with the Best Practices outlines

+

The following institutions using our services have already provided guidelines +for best practices:

+
    +
  1. +

    Harvard University

    +
  2. +
  3. +

    Boston University

    +
  4. +
+
+

Upcoming Best Practices for other institutions

+

We are in the process of obtaining Best Practices for institutions not listed +above.

+
+

If your institution already have outlined Best Practices guidelines with your +internal IT department, please contact us to list it here soon by emailing us at +help@nerc.mghpcc.org +or, by submitting a new ticket at the NERC's Support Ticketing System.

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/get-started/best-practices/best-practices/index.html b/get-started/best-practices/best-practices/index.html new file mode 100644 index 00000000..31f73071 --- /dev/null +++ b/get-started/best-practices/best-practices/index.html @@ -0,0 +1,4511 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Best Practices for the NERC Users

+

By 2025, according to Gartner's forecast, +the responsibility for approximately 99% of cloud security failures will likely +lie with customers. These failures can be attributed to the difficulties in gauging +and overseeing risks associated with on-prem cloud security. The MGHPCC will enter +into a lightweight Memorandum of Understanding (MOU) with each institutional +customer that consumes NERC services and that will also clearly explain about +the security risks and some of the shared responsibilities for the customers while +using the NERC. This ensures roles and responsibilities are distinctly understood +by each party.

+

NERC Principal Investigators (PIs): PIs are ultimately responsible for their +end-users and the security of the systems and applications that are deployed as +part of their project(s) on NERC. This includes being responsible for the security +of their data hosted on the NERC as well as users, accounts and access management.

+

Every individual user needs to comply with your Institution’s Security +and Privacy policies to protect +their Data, Endpoints, Accounts and Access management. They +must ensure any data created on or uploaded to the NERC is adequately secured. +Each customer has complete control over their systems, networks and assets. It +is essential to restrict access to the NERC provided user environment only to +authorized users by using secure identity and access management. Furthermore, +users have authority over various credential-related aspects, including secure +login mechanisms, single sign-on (SSO), and multifactor authentication.

+

Under this model, we are responsible for operation of the physical infrastructure +that includes responsibility for protecting, patching and maintaining underlying +virtualization layer, servers, disks, storage, network gears, other hardwares, +and softwares. Whereas NERC users are responsible for the security of the guest +operating system (OS) and software stack i.e. databases used to run their +applications and data. They are also entrusted with safeguarding middleware, +containers, workloads, and any code or data generated by the platform.

+

All NERC users are responsible for their use of NERC services, which include:

+
    +
  • +

    Following the best practices for security on NERC services. Please review your + institutional guidelines next.

    +
  • +
  • +

    Complying with security policies regarding VMs and containers. NERC admins are + not responsible for maintaining or deploying VMs or containers created by PIs + for their projects. See Harvard University and Boston University policies + here. We will be adding more + institutions under this page soon. Without prior notice, NERC reserves the right + to shut down any VM or container that is causing internal or external problems + or violating these policies.

    +
  • +
  • +

    Adhering to institutional restrictions and compliance policies around the data + they upload and provide access to/from NERC. At NERC, we only offer users to + store internal data in which information is chosen to keep confidential but the + disclosure of which would not cause material harm to you, your users and your + institution. Your institution may have already classified and categorized data + and implemented security policies and guidance for each category. If your project + includes sensitive data and information then you might need to contact NERC's + admin as soon as possible to discuss other potential options.

    +
  • +
  • +

    Backups and/or snapshots + are the user's responsibility for volumes/data, configurations, objects, and + their state, which are useful in the case when users accidentally delete/lose + their data. NERC admins cannot recover lost data. In addition, while NERC stores + data with high redundancy to deal with computer or disk failures, PIs should + ensure they have off-site backups for disaster recovery, e.g., to deal with + occasional disruptions and outages due to the natural disasters that impact the + MGHPCC data center.

    +
  • +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/get-started/cost-billing/billing-faqs/index.html b/get-started/cost-billing/billing-faqs/index.html new file mode 100644 index 00000000..61146939 --- /dev/null +++ b/get-started/cost-billing/billing-faqs/index.html @@ -0,0 +1,4640 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Billing Frequently Asked Questions (FAQs)

+

Our primary focus is to deliver outstanding on-prem cloud services, prioritizing +reliability, security, and cutting-edge solutions to meet your research and teaching +requirements. To achieve this, we have implemented a cost-effective pricing model +that enables us to maintain, enhance, and sustain the quality of our services. By +adopting consistent cost structures across all institutions, we can make strategic +investments in infrastructure, expand our service portfolio, and enhance our +support capabilities for a seamless user experience.

+

Most of the institutions using our services have an MOU (Memorandum Of Understanding) +with us to be better aligned to a number of research regulations, policies and +requirements but if your institution does not have an MOU with us, please have +someone from your faculty or administration contact us to discuss it soon by emailing +us at help@nerc.mghpcc.org +or, by submitting a new ticket at the NERC's Support Ticketing System.

+

Questions & Answers

+
+1. As a new NERC PI for the first time, am I entitled to any credits? +
    +
  • +

    Yes, you will receive up to $1000 of credit for the first month only.

    +
  • +
  • +

    This credit is not transferable to subsequent months.

    +
  • +
  • +

    This does not apply to the usage of GPU resources.

    +
  • +
+
+
+2. How often will I be billed? +

You or your institution will be billed monthly within the first week of each +month.

+
+
+3. If I have an issue with my bill, who do I contact? +

Please send your requests by emailing us at +help@nerc.mghpcc.org +or, by submitting a new ticket at the NERC's Support Ticketing System.

+
+
+4. How do I control costs? +

Upon creating a project, you will set these resource limits (quotas) for +OpenStack (VMs), OpenShift (containers), and storage through +ColdFront. This is the maximum +amount of resources you can consume at one time.

+
+
+5. Are we invoicing for CPUs/GPUs only when the VM or Pod is active? +

Yes. You will only be billed based on your utilization (cores, memory, GPU) +when VMs exist (even if they are Stopped!) or when pods are running. +Utilization will be translated into billable Service Units (SUs).

+

Persistent storage related to an OpenStack VM or OpenShift Pod will continue +to be billed even when the VM is stopped or the Pod is not running.

+
+
+6. Am I going to incur costs for allocations after end date? +

Currently, a project will continue be able to utilize allocations even after +their "End Date", resulting in ongoing costs for you. Such allocations +will be marked as "Active (Needs Renewal)". In the future, we plan to change +this behavior so that allocations after end date will prevent associated +VMs/pods from starting and may cause active VMs/pods to cease running.

+
+
+7. Are VMs invoiced even when shut down? +

Yes, as long as VMs are using resources they are invoiced. In order not to be +billed for a VM you must delete +the Instance/VM. It is a good idea to create a snapshot of your VM +prior to deleting it.

+
+
+8. Will OpenStack & OpenShift show on a single invoice? +

Yes. In the near future customers of NERC will be able to view per project service +utilization via the XDMoD tool.

+
+
+9. What happens when a Flavor is expanded during the month? +

a. Flavors cannot be expanded.

+

b. You can create a snapshot of an existing VM/Instance and, with that snapshot, +deploy a new flavor of VM/Instance.

+
+
+10. Is storage charged separately? +

Yes, but on the same invoice. To learn more, see our page on Storage.

+
+
+11. Will I be charged for storage attached to shut-off instances? +

Yes.

+
+
+12. Are we Invoicing Storage using ColdFront Requests or resource usage? +

a. Storage is invoiced based on Coldfront Requests.

+

b. When you request additional storage through Coldfront, invoicing on that +additional storage will occur when your request is fulfilled. When you request +a decrease in storage through +Request change using ColdFront, +your invoicing will adjust accordingly when your request is made. In both cases +'invoicing' means 'accumulate hours for whatever storage quantity was added +or removed'.

+

For example:

+
    +
  1. +

    I request an increase in storage, the request is approved and processed.

    +
      +
    • At this point we start Invoicing.
    • +
    +
  2. +
  3. +

    I request a decrease in storage.

    +
      +
    • The invoicing for that storage stops immediately.
    • +
    +
  4. +
+
+
+13. For OpenShift, what values are we using to track CPU & Memory? +

a. For invoicing we utilize requests.cpu for tracking CPU utilization & +requests.memory for tracking memory utilization.

+

b. Utilization will be capped based on the limits you set in ColdFront for +your resource allocations.

+
+
+14. If a single Pod exceeds the resources for a GPU SU, how is it invoiced? +

It will be invoiced as 2 or more GPU SU's depending on how many multiples of +the resources it exceeds.

+
+
+15. How often will we change the pricing? +

a. Our current plan is no more than once a year for existing offerings.

+

b. Additional offerings may be added throughout the year (i.e. new types of +hardware or storage).

+
+
+16. Is there any NERC Pricing Calculator? +

Yes. Start your estimate with no commitment based on your resource needs by +using this online tool. For more information about how to use this tool, see +How to use the NERC Pricing Calculator.

+
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/get-started/cost-billing/billing-process-for-bu/index.html b/get-started/cost-billing/billing-process-for-bu/index.html new file mode 100644 index 00000000..11a7474c --- /dev/null +++ b/get-started/cost-billing/billing-process-for-bu/index.html @@ -0,0 +1,4520 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Billing Process for Boston University

+

Boston University has elected to receive a centralized invoice for its university +investigators and their designated user’s use of NERC services. IS&T will then +internally recover the cost from investigators. The process for cost recovery is +currently being implemented, and we will reach out to investigators once the process +is complete to obtain internal funding information to process your monthly bill.

+

Subsidization of Boston University’s Use of NERC

+

Boston University will subsidize a portion of NERC usage by its investigators. +The University will subsidize $100 per month of an investigator’s total usage on +NERC, regardless of the number of NERC projects an investigator has established. +Monthly subsidies cannot be carried over to subsequent months. The subsidized +amount and method are subject to change, and any adjustments will be conveyed +directly to investigators and updated on this page.

+

Please direct any questions about BU’s billing process by emailing us at +help@nerc.mghpcc.org +or submitting a new ticket to the the NERC's Support Ticketing System. +Questions about a specific invoice that you have received can be sent to IST-ISR-NERC@bu.edu.

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/get-started/cost-billing/billing-process-for-harvard/index.html b/get-started/cost-billing/billing-process-for-harvard/index.html new file mode 100644 index 00000000..fc60d151 --- /dev/null +++ b/get-started/cost-billing/billing-process-for-harvard/index.html @@ -0,0 +1,4544 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Billing Process for Harvard University

+

Direct Billing for NERC is a convenience service for Harvard Faculty and Departments. +HUIT will pay the monthly invoices and then allocate the monthly usage costs on +the Harvard University General Ledger. This follows a similar pattern with how +other Public Cloud Providers (AWS, Azure, GCP) accounts are billed and leverage +the HUIT Central Billing Portal. Your HUIT +Customer Code will be matched to your NERC Project Allocation Name as a Billing +Asset. In this process you will be asked for your GL billing code, which you can +change as needed per project. Please be cognizant that only a single billing code +is allowed per billing asset. Therefore, if you have multiple projects with different +funds, if you are able, please create a separate project for each fund. Otherwise, +you will need to take care of this with internal journals inside of your department +or lab. During each monthly billing cycle, the NERC team will upload the billing +Comma-separated values (CSV) files to the HUIT Central Billing system accessible +AWS Object Storage (S3) bucket. The HUIT Central Billing system ingests billing +data files provided by NERC, maps the usage costs to HUIT Billing customers +(and GL Codes) and then includes those amounts in HUIT Monthly Billing of all +customers. This is an automated process.

+

Please follow these two steps to ensure proper billing setup:

+
    +
  1. +

    Each Harvard PI must have a HUIT billing account linked to their NetID (abc123), + and NERC requires a HUIT "Customer Code" for billing purposes. To create + a HUIT billing account, sign up here + with your HarvardKey. The PI's submission of the corresponding HUIT + "Customer Code" is now seamlessly integrated into the PI user account role + submission process. This means that PIs can provide the corresponding HUIT + "Customer Code" either while submitting NERC's PI Request Form + or by submitting a new ticket at NERC's Support Ticketing System + under the "NERC PI Account Request" option in the Help Topic dropdown menu.

    +
    +

    What if you already have an existing Customer Code?

    +

    Please note that if you already have an existing active NERC account, you + need to provide your HUIT Customer Code to NERC. If you think your department + may already have a HUIT account but you don’t know the corresponding Customer + Code then you can contact HUIT Billing + to get the required Customer Code.

    +
    +
  2. +
  3. +

    During the Resource Allocation review and approval process, we will utilize the + HUIT "Customer Code" provided by the PI in step #1 to align it with the approved + allocation. Before confirming the mapping of the Customer Code to the Resource + Allocation, we will send an email to the PI to confirm its accuracy and then + approve the requested allocation. Subsequently, after the allocation is approved, + we will request the PI to initiate a change request + to input the correct "Customer Code" into the allocation's "Institution-Specific + Code" attribute's value.

    +
    +

    Very Important Note

    +

    We recommend keeping your "Institution-Specific Code" updated at all +times, ensuring it accurately reflects your current and valid Customer +Code. The PI or project manager(s) have the authority to request changes +for updating the "Institution-Specific Code" attribute for each resource +allocation. They can do so by submitting a Change Request as outlined here.

    +
    +

    How to view Project Name, Project ID & Institution-Specific Code?

    +

    By clicking on the Allocation detail page through ColdFront, you can access +information about the allocation of each resource, including OpenStack and +OpenShift as described here. +You can review and verify Allocated Project Name, Allocated Project +ID and Institution-Specific Code attributes, which are located under +the "Allocation Attributes" section on the detail page as +described here.

    +
    +
    +

    Once we confirm the six-digit HUIT Customer Code for the PI and the correct +resource allocation, the NERC admin team will initiate the creation of a new +ServiceNow ticket. This will be done by reaching out to +HUIT Billing +or directly emailing HUIT Billing at huit-billing@harvard.edu +for the approved and active allocation request.

    +

    In this email, the NERC admin needs to specify the Allocated Project ID, +Allocated Project Name, Customer Code, and PI's Email address. +Then, the HUIT billing team will generate a unique Asset ID to be utilized +by the Customer's HUIT billing portal.

    +
    +

    Important Information regarding HUIT Billing SLA

    +

    Please note that we will require the PI or Manager(s) to repeat step #2 +for any new resource allocation(s) as well as renewed allocation(s). +Additionally, the HUIT Billing SLA for new Cloud Billing assets is 2 +business days, although most requests are typically completed within +8 hours.

    +
    +
    +

    Harvard University Security Policy Information

    +

    Please note that all assets deployed to your NERC project must be compliant +with University Security policies as described +here. Please familiarize +yourself with the +Harvard University Information Security Policy +and your role in securing data. If you have any questions about how Security +should be implemented in the Cloud, please contact your school security +officer: "Havard Security Officer".

    +
    +
  4. +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/get-started/cost-billing/billing-process-for-my-institution/index.html b/get-started/cost-billing/billing-process-for-my-institution/index.html new file mode 100644 index 00000000..f7df5494 --- /dev/null +++ b/get-started/cost-billing/billing-process-for-my-institution/index.html @@ -0,0 +1,4559 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Billing Process for My Institution

+

Memorandum of Understanding (MOU)

+

The New England Research Cloud (NERC) is a shared service offered through the +Massachusetts Green High Performance Computing Center (MGHPCC). The MGHPCC will +enter into a lightweight Memorandum of Understanding (MOU) with each institutional +customer that consumes NERC services. The MOU is intended to ensure the institution +maintains access to valuable and relevant cloud services provided by the MGHPCC +via the NERC to be better aligned to a number of research regulations, policies, +and requirements and also ensure NERC remains sustainable over time.

+

Institutions with established MOUs and Billing Processes

+

For cost recovery purposes, institutional customers may elect to receive one invoice +for the usage of NERC services by its PIs and cost recovery internally. Every month, +the NERC team will export, back up, and securely store the billing data for all +PIs in the form of comma-separated values (CSV) files and provide it to the MGHPCC +for billing purposes.

+

The following institutions using our services have established MOU as well as +billing processes with us:

+
    +
  1. +

    Harvard University

    +
  2. +
  3. +

    Boston University

    +
  4. +
+
+

Upcoming MOU with other institutions

+

We are in the process of establishing MOUs for institutions not listed above.

+
+

PIs from other institutions not listed above can still utilize NERC services with +the understanding that they are directly accountable for managing their usage and +ensuring all service charges are paid promptly. If you have any some common +questions or need further information, see our Billing FAQs +for comprehensive answers.

+

If your institution does not have an MOU with us, please have someone from your +faculty or administration contact us to discuss it soon by emailing us at +help@nerc.mghpcc.org +or, by submitting a new ticket at the NERC's Support Ticketing System.

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/get-started/cost-billing/how-pricing-works/index.html b/get-started/cost-billing/how-pricing-works/index.html new file mode 100644 index 00000000..f321b9c8 --- /dev/null +++ b/get-started/cost-billing/how-pricing-works/index.html @@ -0,0 +1,4876 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

How does NERC pricing work?

+
+

As a new PI using NERC for the first time, am I entitled to any credits?

+

As a new PI using NERC for the first time, you might wonder if you get any +credits. Yes, you'll receive up to $1000 for the first month only. But +remember, this credit can not be used in the following months. Also, +it does not apply to GPU resource usage.

+
+

NERC offers you a pay-as-you-go approach for pricing for our cloud infrastructure +offerings (Tiers of Service), including Infrastructure-as-a-Service (IaaS) – Red +Hat OpenStack and Platform-as-a-Service (PaaS) – Red Hat OpenShift. The exception +is the Storage quotas in NERC Storage Tiers, where the cost is determined by +your requested and approved allocation values +to reserve storage from the total NESE storage pool. For NERC (OpenStack) +Resource Allocations, storage quotas are specified by the "OpenStack Volume Quota +(GiB)" and "OpenStack Swift Quota (GiB)" allocation attributes. Whereas for +NERC-OCP (OpenShift) Resource Allocations, storage quotas are specified by the +"OpenShift Request on Storage Quota (GiB)" and "OpenShift Limit on Ephemeral Storage +Quota (GiB)" allocation attributes. If you have common questions or need more +information, refer to our Billing FAQs for comprehensive answers. +NERC offers a flexible cost model where an institution (with a per-project breakdown) +is billed solely for the duration of the specific services required. Access is based +on project-approved resource quotas, eliminating runaway usage and charges. There +are no obligations of long-term contracts or complicated licensing agreements. +Each institution will enter a lightweight MOU with MGHPCC that defines the services +and billing model.

+

Calculations

+

Service Units (SUs)

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NamevGPUvCPURAM (GiB)Current Price
H100 GPU164384$6.04
A100sxm4 GPU132240$2.078
A100 GPU12474$1.803
V100 GPU148192$1.214
K80 GPU1628.5$0.463
CPU014$0.013
+
+

Expected Availability of H100 GPUs

+

H100 GPUs will be available in early 2025.

+
+

Breakdown

+

CPU/GPU SUs

+

Service Units (SUs) can only be purchased as a whole unit. We will charge for +Pods (summed up by Project) and VMs on a per-hour basis for any portion of an +hour they are used, and any VM "flavor"/Pod reservation is charged as a multiplier +of the base SU for the maximum resource they reserve.

+

GPU SU Example:

+
    +
  • +

    A Project or VM with:

    +

    1 A100 GPU, 24 vCPUs, 95MiB RAM, 199.2hrs

    +
  • +
  • +

    Will be charged:

    +

    1 A100 GPU SUs x 200hrs (199.2 rounded up) x $1.803

    +

    $360.60

    +
  • +
+

OpenStack CPU SU Example:

+
    +
  • +

    A Project or VM with:

    +

    3 vCPU, 20 GiB RAM, 720hrs (24hr x 30days)

    +
  • +
  • +

    Will be charged:

    +

    5 CPU SUs due to the extra RAM (20GiB vs. 12GiB(3 x 4GiB)) x 720hrs x $0.013

    +

    $46.80

    +
  • +
+
+

Are VMs invoiced even when shut down?

+

Yes, VMs are invoiced as long as they are utilizing resources. In order not +to be billed for a VM, you must delete +your Instance/VM. It is advisable to create a snapshot +of your VM prior to deleting it, ensuring you have a backup of your data and +configurations. By proactively managing your VMs and resources, you can +optimize your usage and minimize unnecessary costs.

+

If you have common questions or need more information, refer to our +Billing FAQs for comprehensive +answers.

+
+

OpenShift CPU SU Example:

+
    +
  • +

    Project with 3 Pods with:

    +

    i. 1 vCPU, 3 GiB RAM, 720hrs (24hr*30days)

    +

    ii. 0.1 vCPU, 8 GiB RAM, 720hrs (24hr*30days)

    +

    iii. 2 vCPU, 4 GiB RAM, 720hrs (24hr*30days)

    +
  • +
  • +

    Project Will be charged:

    +

    RoundUP(Sum(

    +

    1 CPU SUs due to first pod * 720hrs * $0.013

    +

    2 CPU SUs due to extra RAM (8GiB vs 0.4GiB(0.1*4GiB)) * 720hrs * $0.013

    +

    2 CPU SUs due to more CPU (2vCPU vs 1vCPU(4GiB/4)) * 720hrs * $0.013

    +

    ))

    +

    =RoundUP(Sum(720(1+2+2)))*0.013

    +

    $46.80

    +
  • +
+
+

How to calculate cost for all running OpenShift pods?

+

If you prefer a function for the OpenShift pods here it is:

+

Project SU HR count = RoundUP(SUM(Pod1 SU hour count + Pod2 SU hr count + +...))

+
+

OpenShift Pods are summed up to the project level so that fractions of CPU/RAM +that some pods use will not get overcharged. There will be a split between CPU and +GPU pods, as GPU pods cannot currently share resources with CPU pods.

+

Storage

+

Storage is charged separately at a rate of $0.009 TiB/hr or $9.00E-6 GiB/hr. +OpenStack volumes remain provisioned until they are deleted. VM's reserve +volumes, and you can also create extra volumes yourself. In OpenShift pods, storage +is only provisioned while it is active, and in persistent volumes, storage remains +provisioned until it is deleted.

+
+

Very Important: Requested/Approved Allocated Storage Quota and Cost

+

The Storage cost is determined by +your requested and approved allocation values. +Once approved, these Storage quotas will need to be reserved from the +total NESE storage pool for both NERC (OpenStack) and NERC-OCP (OpenShift) +resources. For NERC (OpenStack) Resource Allocations, storage quotas are +specified by the "OpenStack Volume Quota (GiB)" and "OOpenStack Swift Quota +(GiB)" allocation attributes. Whereas for NERC-OCP (OpenShift) Resource +Allocations, storage quotas are specified by the "OpenShift Request on Storage +Quota (GiB)" and "OpenShift Limit on Ephemeral Storage Quota (GiB)" allocation +attributes.

+

Even if you have deleted all volumes, snapshots, and object storage buckets and +objects in your OpenStack and OpenShift projects. It is very essential to +adjust the approved values for your NERC (OpenStack) and NERC-OCP (OpenShift) +resource allocations to zero (0) otherwise you will still be incurring a charge +for the approved storage as explained in Billing FAQs.

+

Keep in mind that you can easily scale and expand your current resource +allocations within your project. Follow this guide +on how to use NERC's ColdFront to reduce your Storage quotas for NERC (OpenStack) +allocations and this guide +for NERC-OCP (OpenShift) allocations.

+
+

Storage Example 1:

+
    +
  • +

    Volume or VM with:

    +

    500GiB for 699.2hrs

    +
  • +
  • +

    Will be charged:

    +

    .5 Storage TiB SU (.5 TiB x 700hrs) x $0.009 TiB/hr

    +

    $3.15

    +
  • +
+

Storage Example 2:

+
    +
  • +

    Volume or VM with:

    +

    10TiB for 720hrs (24hr x 30days)

    +
  • +
  • +

    Will be charged:

    +

    10 Storage TiB SU (10TiB x 720 hrs) x $0.009 TiB/hr

    +

    $64.80

    +
  • +
+

Storage includes all types of storage Object, Block, Ephemeral & Image.

+

High-Level Function

+

To provide a more practical way to calculate your usage, here is a function of +how the calculation works for OpenShift and OpenStack.

+
    +
  1. +

    OpenStack = (Resource (vCPU/RAM/vGPU) assigned to VM flavor converted to + number of equivalent SUs) * (time VM has been running), rounded up to a whole + hour + Extra storage.

    +
    +

    NERC's OpenStack Flavor List

    +

    You can find the most up-to-date information on the current NERC's OpenStack +flavors with corresponding SUs by referring to this page.

    +
    +
  2. +
  3. +

    OpenShift = (Resource (vCPU/RAM) requested by Pod converted to the number + of SU) * (time Pod was running), summed up to project level rounded up to the + whole hour.

    +
  4. +
+

How to Pay?

+

To ensure a comprehensive understanding of the billing process and payment options +for NERC offerings, we advise PIs/Managers to visit individual pages designated +for each institution. These pages provide +detailed information specific to each organization's policies and procedures +regarding their billing. By exploring these dedicated pages, you can gain insights +into the preferred payment methods, invoicing cycles, breakdowns of cost components, +and any available discounts or offers. Understanding the institution's unique +approach to billing ensures accurate planning, effective financial management, +and a transparent relationship with us.

+

If you have any some common questions or need further information, see our +Billing FAQs for comprehensive answers.

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/get-started/cost-billing/images/cost-estimator-bottom-sheets.png b/get-started/cost-billing/images/cost-estimator-bottom-sheets.png new file mode 100644 index 00000000..976b497d Binary files /dev/null and b/get-started/cost-billing/images/cost-estimator-bottom-sheets.png differ diff --git a/get-started/cost-billing/images/su.png b/get-started/cost-billing/images/su.png new file mode 100644 index 00000000..31c7da53 Binary files /dev/null and b/get-started/cost-billing/images/su.png differ diff --git a/get-started/cost-billing/nerc-pricing-calculator/index.html b/get-started/cost-billing/nerc-pricing-calculator/index.html new file mode 100644 index 00000000..3f29ef4a --- /dev/null +++ b/get-started/cost-billing/nerc-pricing-calculator/index.html @@ -0,0 +1,4476 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

NERC Pricing Calculator

+

The NERC Pricing Calculator is a google excel based tool for estimating the cost +of utilizing various NERC resources in different NERC service offerings. It offers +a user-friendly interface, allowing users to input their requirements and customize +configurations to generate accurate and tailored cost estimates for optimal +budgeting and resource allocation.

+

Start your estimate with no commitment, and explore NERC services and pricing for +your research needs by using this online tool.

+
+

How to use the NERC Pricing Calculator?

+

Please Note, you need to make a copy of this tool before estimating the +cost and once copied you can easily update corresponding resource type columns' +values on your own working sheet that will reflect your potential Service +Units (SU), Rate, and cost per Hour, Month and Year. This tool has 4 sheets +at the bottom as shown here: +Estimator Available Sheets +If you are more interested to calculate your cost estimates based on the available +NERC OpenStack flavors +(which define the compute, memory, and storage capacity for your dedicated +instances), you can select and use the second sheet titled "OpenStack Flavor". +For cost estimating the NERC OpenShift resources, you can use the first sheet +titled "OpenShift SU" and input pod specific resource requests in each row. +If you are scaling the pods more than one then you need to enter a new row or +entry for each scaled pods. For Storage cost, you need to use the third sheet +titled "Calculate Storage". And then the total cost will be reflected at +the last sheet titled "Total Cost".

+
+

For more information about how NERC pricing works, see +How does NERC pricing work and +to know more about billing process for your own institution, see +Billing Process for My Institution.

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/get-started/create-a-user-portal-account/index.html b/get-started/create-a-user-portal-account/index.html new file mode 100644 index 00000000..cad62c4e --- /dev/null +++ b/get-started/create-a-user-portal-account/index.html @@ -0,0 +1,4665 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

User Account Types

+

NERC offers two types of user accounts: a Principal Investigator (PI) Account +and a General User Account. All General Users must be assigned to their project +by an active NERC PI or by one of the delegated project manager(s), as +described here. Then, those project +users can be added to the resource allocation during a new allocation request or +at a later time.

+
+

Principal Investigator Eligibility Information

+
    +
  • +

    MGHPCC consortium members, whereby they enter into an service agreement with +MGHPCC for the NERC services.

    +
  • +
  • +

    Non-members of MGHPCC can also be PIs of NERC Services, but must also have +an active non-member agreement with MGHPCC.

    +
  • +
  • +

    External research focused institutions will be considered on a case-by-case +basis and are subject to an external customer cost structure.

    +
  • +
+
+

A PI account can request allocations of NERC resources, grant access to other +general users enabling them to log into NERC's computational project space, and +delegate its responsibilities to other collaborators from the same institutions +or elsewhere as managers using NERC's ColdFront interface, +as described here.

+

Getting Started

+

Any faculty, staff, student, and external collaborator must request a user account +through the MGHPCC Shared Services (MGHPCC-SS) Account Portal, +also known as "RegApp". This is a web-based, single point-of-entry to the NERC +system that displays a user welcome page. The welcome page of the account +registration site displays instructions on how to register a General User +account on NERC, as shown in the image below:

+

MGHPCC Shared Services (MGHPCC-SS) Account Portal Welcome Page

+

There are two options: either register for a new account or manage an existing +one. If you are new to NERC and want to register as a new MGHPCC-SS user, click +on the "Register for an Account" button. This will redirect you to a new web page +which shows details about how to register for a new MGHPCC-SS user account. NERC +uses CILogon that supports login either using your Institutional Identity +Provider (IdP).

+

Clicking the "Begin MGHPCC-SS Account Creation Process" button will initiate the +account creation process. You will be redirected to a site managed by CILogon +where you will select your institutional or commercial identity provider, as +shown below:

+

CILogon Page

+

Once selected, you will be redirected to your institutional or commercial identity +provider, where you will log in, as shown here:

+

Institutional IdP Login Page

+

After a successful log on, your browser will be redirected back to the MGHPCC-SS +Registration Page and ask for a review and confirmation of creating your account +with fetched information to complete the account creation process.

+

User Account Review Before Creation Page

+
+

Very Important

+

If you don't click the "Create MGHPCC-SS Account" button, your account will +not be created! So, this is a very important step. Review your information +carefully and then click on the "Create MGHPCC-SS Account" button to save +your information. Please review the information, make any corrections that +you need and fill in any blank/ missing fields such as "Research Domain". Please +read the End User Level Agreement (EULA) and accept the terms by checking +the checkbox in this form.

+
+

Once you have reviewed and verified that all your user information in this form +is correct, only then click the "Create MGHPCC-SS Account" button. This will +automatically send an email to your email address with a link to validate and +confirm your account information.

+

User Account Email Verification Page

+

Once you receive an "MGHPCC-SS Account Creation Validation" email, review your +user account information to ensure it is correct. Then, click on the provided +validation web link and enter the unique account creation Confirmation Code +provided in the email as shown below:

+

MGHPCC-SS Account Creation Validation

+

Once validated, you need to ensure that your user account is created and valid +by viewing the following page:

+

Successful Account Validation Page

+
+

Important Note

+

If you have an institutional identity, it's preferable to use that identity +to create your MGHPCC-SS account. Institutional identities are vetted by identity +management teams and provide a higher level of confidence to resource owners +when granting access to resources. You can only link one university account +to an MGHPCC-SS account; if you have multiple university accounts, you will +only be able to link one of those accounts to your MGHPCC-SS account. If, at +a later date, you want to change which account is connected to your MGHPCC-SS +identity, you can do so by contacting help@mghpcc.org.

+
+

How to update and modify your MGHPCC-SS account information?

+
    +
  1. +

    Log in to the RegApp using your MGHPCC-SS account.

    +
  2. +
  3. +

    Click on "Manage Your MGHPCC-SS Account" button as shown below:

    +

    MGHPCC-SS Account Update

    +
  4. +
  5. +

    Review your currently saved account information, make any necessary corrections + or updates to fields, and then click on the "Update MGHPCC-SS Account" button.

    +
  6. +
  7. +

    This will send an email to verify your updated account information, so please + check your email address.

    +
  8. +
  9. +

    Confirm and validate the new account details by clicking the provided validation + web link and entering the unique Confirmation Code provided in the email + as shown below:

    +

    MGHPCC-SS Account Update Validation

    +
  10. +
+

How to request a Principal Investigator (PI) Account?

+

The process for requesting and obtaining a PI Account is relatively simple. +You can fill out this NERC Principal Investigator (PI) Account Request form +to initiate the process.

+

Alternatively, users can request a Principal Investigator (PI) user account +by submitting a new ticket at the NERC's Support Ticketing System +under the "NERC PI Account Request" option in the Help Topic dropdown menu, +as shown in the image below:

+

the NERC's Support Ticketing System PI Ticket

+
+

Information

+

Once your PI user request is reviewed and approved by the NERC's admin, you +will receive an email confirmation from NERC's support system, i.e., +help@nerc.mghpcc.org. +Then, you can access NERC's ColdFront resource allocation management portal +using the PI user role, as described here.

+
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/get-started/images/CILogon.png b/get-started/images/CILogon.png new file mode 100644 index 00000000..8931dd9d Binary files /dev/null and b/get-started/images/CILogon.png differ diff --git a/get-started/images/account-email-verification-page.png b/get-started/images/account-email-verification-page.png new file mode 100644 index 00000000..84c8826f Binary files /dev/null and b/get-started/images/account-email-verification-page.png differ diff --git a/get-started/images/account_creation_confirmation.png b/get-started/images/account_creation_confirmation.png new file mode 100644 index 00000000..58ddbb02 Binary files /dev/null and b/get-started/images/account_creation_confirmation.png differ diff --git a/get-started/images/account_update.png b/get-started/images/account_update.png new file mode 100644 index 00000000..89af616d Binary files /dev/null and b/get-started/images/account_update.png differ diff --git a/get-started/images/account_update_confirmation.png b/get-started/images/account_update_confirmation.png new file mode 100644 index 00000000..ffbd9125 Binary files /dev/null and b/get-started/images/account_update_confirmation.png differ diff --git a/get-started/images/institutional_idp.png b/get-started/images/institutional_idp.png new file mode 100644 index 00000000..84cb4d94 Binary files /dev/null and b/get-started/images/institutional_idp.png differ diff --git a/get-started/images/osticket-pi-request.png b/get-started/images/osticket-pi-request.png new file mode 100644 index 00000000..d10919ba Binary files /dev/null and b/get-started/images/osticket-pi-request.png differ diff --git a/get-started/images/regapp-welcome-page.png b/get-started/images/regapp-welcome-page.png new file mode 100644 index 00000000..039a3582 Binary files /dev/null and b/get-started/images/regapp-welcome-page.png differ diff --git a/get-started/images/successful-account-validation.png b/get-started/images/successful-account-validation.png new file mode 100644 index 00000000..cb62ccfd Binary files /dev/null and b/get-started/images/successful-account-validation.png differ diff --git a/get-started/images/user-account-review-page.png b/get-started/images/user-account-review-page.png new file mode 100644 index 00000000..e03e6854 Binary files /dev/null and b/get-started/images/user-account-review-page.png differ diff --git a/get-started/images/user-flow-NERC.png b/get-started/images/user-flow-NERC.png new file mode 100644 index 00000000..711bf855 Binary files /dev/null and b/get-started/images/user-flow-NERC.png differ diff --git a/get-started/user-onboarding-on-NERC/index.html b/get-started/user-onboarding-on-NERC/index.html new file mode 100644 index 00000000..32ae2ce0 --- /dev/null +++ b/get-started/user-onboarding-on-NERC/index.html @@ -0,0 +1,4509 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

User Onboarding Process Overview

+

NERC's Research allocations are available to faculty members and researchers, including +postdoctoral researchers and students. In order to get access to resources provided +by NERC's computational infrastructure, you must first register and obtain a user +account.

+

The overall user flow can be summarized using the following sequence diagram:

+

NERC user flow

+
    +
  1. +

    All users including PI need to register to NERC via: https://regapp.mss.mghpcc.org/.

    +
  2. +
  3. +

    PI will send a request for a Principal Investigator (PI) user account role + by submitting: NERC's PI Request Form.

    +

    Alternatively, users can request a Principal Investigator (PI) user account +by submitting a new ticket at the NERC's Support Ticketing System +under the "NERC PI Account Request" option in the Help Topic dropdown menu, +as shown in the image below:

    +

    the NERC's Support Ticketing System PI Ticket

    +
    +

    Principal Investigator Eligibility Information

    +
      +
    • +

      MGHPCC consortium members, whereby they enter into an service agreement +with MGHPCC for the NERC services.

      +
    • +
    • +

      Non-members of MGHPCC can also be PIs of NERC Services, but must also have an active non-member agreement with MGHPCC.

      +
    • +
    • +

      External research focused institutions will be considered on a case-by-case basis and are subject to an external customer cost structure.

      +
    • +
    +
    +
  4. +
  5. +

    Wait until the PI request gets approved by the NERC's admin.

    +
  6. +
  7. +

    Once a PI request is approved, PI can add a new project and also search + and add user(s) to the project - Other general user(s) can also see the project(s) + once they are added to a project via: https://coldfront.mss.mghpcc.org.

    +
  8. +
  9. +

    PI or project Manager can request resource allocation either NERC (OpenStack) + or NERC-OCP (OpenShift) for the newly added project and select which user(s) + can use the requested allocation.

    +
    +

    As a new NERC PI for the first time, am I entitled to any credits?

    +

    As a new PI using NERC for the first time, you might wonder if you get +any credits. Yes, you'll receive up to $1000 for the first month only. +But remember, this credit can not be used in the following months. +Also, it does not apply to GPU resource usage.

    +
    +
  10. +
  11. +

    Wait until the requested resource allocation gets approved by the NERC's admin.

    +
  12. +
  13. +

    Once approved, PI and the corresponding project users can go to either + NERC Openstack horizon web interface: https://stack.nerc.mghpcc.org + or NERC OpenShift web console: https://console.apps.shift.nerc.mghpcc.org + based on approved Resource Type and they can start using the NERC's resources + based on the approved project quotas.

    +
  14. +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/images/NERC-Diagram-MOC.png b/images/NERC-Diagram-MOC.png new file mode 100644 index 00000000..8bc1f88f Binary files /dev/null and b/images/NERC-Diagram-MOC.png differ diff --git a/index.html b/index.html new file mode 100644 index 00000000..1da291a4 --- /dev/null +++ b/index.html @@ -0,0 +1,4448 @@ + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

NERC Technical Documentation

+
+

NERC welcomes your contributions

+

These pages are hosted from a +git repository and +contributions +are welcome!

+

Fork this repo

+
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/javascripts/extra.js b/javascripts/extra.js new file mode 100644 index 00000000..e69de29b diff --git a/migration-moc-to-nerc/Step1/index.html b/migration-moc-to-nerc/Step1/index.html new file mode 100644 index 00000000..441487d2 --- /dev/null +++ b/migration-moc-to-nerc/Step1/index.html @@ -0,0 +1,4617 @@ + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Creating NERC Project and Networks

+

This process includes some waiting for emails and approvals. It is advised to +start this process and then move to Step 2 +and continue with these steps once you recieve approval.

+

Account Creation & Quota Request

+
    +
  1. +

    Register for your new NERC account + here.

    +
      +
    1. Wait for an approval email.
    2. +
    +
  2. +
  3. +

    Register to be a PI for a NERC account + here.

    +
      +
    1. Wait for an approval email.
    2. +
    +
  4. +
  5. +

    Request the quota necessary for all of your MOC Projects to be added + to NERC here + (link also in PI approval email).

    +

    ColdFront_Login

    +
      +
    1. +

      Log in with your institution login by clicking on + Log in via OpenID Connect (highlighted in yellow above).

      +

      ColdFront_Projects

      +
    2. +
    3. +

      Under Projects>> Click on the name of your project + (highlighted in yellow above).

      +

      ColdFront_Projects

      +
    4. +
    5. +

      Scroll down until you see Request Resource Allocation + (highlighted in yellow above) and click on it.

      +

      ColdFront_Allocation

      +
    6. +
    7. +

      Fill out the Justification (highlighted in purple above) for + the quota allocation.

      +
    8. +
    9. +

      Using your “MOC Instance information” table you gathered from your MOC + project calculate the total number of Instances, VCPUs, RAM and use your + “MOC Volume Information” table to calculate Disk space you will need.

      +
    10. +
    11. +

      Using the up and down arrows (highlighted in yellow above) or by + entering the number manually select the multiple of 1 Instance, 2 vCPUs, + 0 GPUs, 4GB RAM, 2 Volumes and 100GB Disk and 1GB Object Storage that you + will need.

      +
        +
      1. For example if I need 2 instances 2 vCPUs, 3GB RAM, 3 Volumes and + 30GB of storage I would type in 2 or click the up arrow once to select + 2 units.
      2. +
      +
    12. +
    13. +

      Click Submit (highlighted in green above).

      +
    14. +
    +
  6. +
  7. +

    Wait for your allocation approval email.

    +
  8. +
+

Setup

+

Login to the Dashboard

+
    +
  1. +

    Log into the + NERC OpenStack Dashboard + using your OpenID Connect password.

    +

    Dashboard_Login

    +
      +
    1. +

      Click Connect.

      +

      Dashboard_Login_CILogon

      +
    2. +
    3. +

      Select your institution from the drop down (highlighted in yellow + above).

      +
    4. +
    5. +

      Click Log On (highlighted in purple).

      +
    6. +
    7. +

      Follow your institution's log on instructions.

      +
    8. +
    +
  2. +
+

Setup NERC Network

+
    +
  1. +

    You are then brought to the Project>Compute>Overview location of + the Dashboard.

    +

    Project_Comp_Overview

    +
      +
    1. +

      This will look very familiar as the MOC and NERC Dashboard are quite + similar.

      +
    2. +
    3. +

      Follow the instructions + here + to set up your network/s (you may also use the default_network + if you wish).

      +
        +
      1. The networks don't have to exactly match the MOC. You only need the + networks for creating your new instances (and accessing them once we + complete the migration).
      2. +
      +
    4. +
    5. +

      Follow the instructions + here + to set up your router/s (you may also use the default_router if you wish).

      +
    6. +
    7. +

      Follow the instructions + here + to set up your Security Group/s.

      +
        +
      1. This is where you can use your “MOC Security Group Information” + table to create similar Security Groups to the ones you had in the MOC.
      2. +
      +
    8. +
    9. +

      Follow the instructions + here + to set up your SSH Key-pair/s.

      +
    10. +
    +
  2. +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/migration-moc-to-nerc/Step2/index.html b/migration-moc-to-nerc/Step2/index.html new file mode 100644 index 00000000..37e2f77f --- /dev/null +++ b/migration-moc-to-nerc/Step2/index.html @@ -0,0 +1,4890 @@ + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Identify Volumes, Instances & Security Groups on the MOC that need to be Migrated to the NERC

+

Please read the instructions in their entirety before proceeding. +Allow yourself enough time to complete them.

+

Volume Snapshots will not be migrated. +If you have a Snapshot you wish to backup please “Create Volume” from it first.

+

Confirm Access and Login to MOC Dashboard

+
    +
  1. Go to the MOC Dashboard.
  2. +
+

SSO / Google Login

+
    +
  1. +

    If you have SSO through your Institution or google select + Institution Account from the dropdown.

    +

    Login1

    +
  2. +
  3. +

    Click Connect.

    +
  4. +
  5. +

    Click on University Logins (highlighted in yellow below) + if you are using SSO with your Institution.

    +

    Login2

    +
      +
    1. Follow your Institution's login steps after that, and skip to + Gathering MOC information for the + Migration.
    2. +
    +
  6. +
  7. +

    Click Google (highlighted in purple above) if your SSO + is through Google.

    +
      +
    1. Follow standard Google login steps to get in this + way, and skip to Gathering MOC information for the + Migration.
    2. +
    +
  8. +
+

Keystone Credentials

+
    +
  1. +

    If you have a standard login and password leave the dropdown + as Keystone Credentials.

    +

    Login3

    +
  2. +
  3. +

    Enter your User Name.

    +
  4. +
  5. +

    Enter your Password.

    +
  6. +
  7. +

    Click Connect.

    +
  8. +
+

Don't know your login?

+
    +
  1. +

    If you do not know your login information please create a + Password Reset ticket.

    +

    OSticket1

    +
  2. +
  3. +

    Click Open a New Ticket (highlighted in yellow above).

    +

    OSticket2

    +
  4. +
  5. +

    Click the dropdown and select Forgot Pass & SSO Account + Link (highlighted in blue above).

    +
  6. +
  7. +

    In the text field (highlighted in purple above) provide + the Institution email, project you are working on and the email + address you used to create the account.

    +
  8. +
  9. +

    Click Create Ticket (highlighted in yellow above) and + wait for the pinwheel.

    +
  10. +
  11. +

    You will receive an email to let you know that the MOC support + staff will get back to you.

    +
  12. +
+

Gathering MOC information for the Migration

+
    +
  1. +

    You are then brought to the Project>Compute>Overview location of the + Dashboard.

    +

    Project_Compute_Instance

    +
  2. +
+

Create Tables to hold your information

+

Create 3 tables of all of your Instances, your Volumes and Security Groups, +for example, if you have 2 instances, 3 volumes and 2 Security Groups like the +samples below your lists might look like this:

+

MOC Instance Information Table

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Instance NameMOC VCPUsMOC DiskMOC RAMMOC UUID
Fedora_test110GB1GB16a1bfc2-8c90-4361-8c13-64ab40bb6207
Ubuntu_Test110GB2GB6a40079a-59f7-407c-9e66-23bc5b749a95
total220GB3GB
+

MOC Volume Information Table

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
MOC Volume NameMOC DiskMOC Attached ToBootableMOC UUIDNERC Volume Name
Fedora10GiBFedora_testYesea45c20b-434a-4c41-8bc6-f48256fc76a8
9c73295d-fdfa-4544-b8b8-a876cc0a1e8610GiBUbuntu_TestYes9c73295d-fdfa-4544-b8b8-a876cc0a1e86
Snapshot of Fed_Test10GiBFedora_testNoea45c20b-434a-4c41-8bc6-f48256fc76a8
total30GiB
+

MOC Security Group Information Table

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Security Group NameDirectionEther TypeIP ProtocolPort RangeRemote IP Prefix
ssh_only_testIngressIPv4TCP220.0.0.0/0
ping_only_testIngressIPv4ICMPAny0.0.0.0/0
+

Gather the Instance Information

+

Gather the Instance UUIDs (of only the instances that you need to migrate +to the NERC).

+
    +
  1. +

    Click + Instances + (highlighted in pink in image above)

    +

    Project_Instance_Name

    +
  2. +
  3. +

    Click the Instance Name (highlighted in Yellow above) of the first + instance you would like to gather data on.

    +

    Project_Inst_Details

    +
  4. +
  5. +

    Locate the ID row (highlighted in green above) and copy and save the ID + (highlighted in purple above).

    +
      +
    1. This is the UUID of your first Instance.
    2. +
    +
  6. +
  7. +

    Locate the RAM, VCPUs & Disk rows (highlighted in yellow) and copy and + save the associated values (highlighted in pink).

    +
  8. +
  9. +

    Repeat this section for each + Instance you have.

    +
  10. +
+

Gather the Volume Information

+

Gather the Volume UUIDs (of only the volumes that you need to migrate +to the NERC).

+

Project_Volumes_Volumes

+
    +
  1. +

    Click Volumes dropdown.

    +
  2. +
  3. +

    Select Volumes + (highlighted in purple above).

    +

    Project_Volumes_Names

    +
  4. +
  5. +

    Click the Volume Name (highlighted in yellow above) of the first + volume you would like to gather data on.

    +
      +
    1. +

      The name might be the same as the ID (highlighted in blue above).

      +

      Project_Volumes_Details

      +
    2. +
    +
  6. +
  7. +

    Locate the ID row (highlighted in green above) and copy and save the ID + (highlighted in purple above).

    +
      +
    1. This is the UUID of your first Volume.
    2. +
    +
  8. +
  9. +

    Locate the Size row (highlighted in yellow above) and copy and save + the Volume size (highlighted in pink above).

    +
  10. +
  11. +

    Locate the Bootable row (highlighted in gray above) and copy and save + the Volume size (highlighted in red above).

    +
  12. +
  13. +

    Locate the Attached To row (highlighted in blue above) and copy and save + the Instance this Volume is attached to (highlighted in orange above).

    +
      +
    1. If the volume is not attached to an image it will state + “Not attached”.
    2. +
    +
  14. +
  15. +

    Repeat this section for each Volume + you have.

    +
  16. +
+

Gather your Security Group Information

+

If you already have all of your Security Group information outside of the +OpenStack Dashboard skip to the section.

+

Gather the Security Group information (of only the security groups that you +need to migrate to the NERC).

+

Project_Network_SecGroup

+
    +
  1. +

    Click Network dropdown

    +
  2. +
  3. +

    Click + Security + Groups (highlighted in yellow above).

    +

    Ntwrk_ScGrp_Names

    +
  4. +
  5. +

    Click Manage Rules (highlighted in yellow above) of the first + Security Group you would like to gather data on.

    +

    Ntwrk_SGp_Detal

    +
  6. +
  7. +

    Ignore the first 2 lines (highlighted in yellow above).

    +
  8. +
  9. +

    Write down the important information for all lines after (highlighted in + blue above).

    +
      +
    1. Direction, Ether Type, IP Protocol, Port Range, Remote IP Prefix, + Remote Security Group.
    2. +
    +
  10. +
  11. +

    Repeat this section + for each security group you have.

    +
  12. +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/migration-moc-to-nerc/Step3/index.html b/migration-moc-to-nerc/Step3/index.html new file mode 100644 index 00000000..94d33a29 --- /dev/null +++ b/migration-moc-to-nerc/Step3/index.html @@ -0,0 +1,5093 @@ + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Steps to Migrate Volumes from MOC to NERC

+

Create a spreadsheet to track the values you will need

+
    +
  1. +

    The values you will want to keep track of are.

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    LabelValue
    MOCAccess
    MOCSecret
    NERCAccess
    NERCSecret
    MOCEndPointhttps://kzn-swift.massopen.cloud
    NERCEndPointhttps://stack.nerc.mghpcc.org:13808
    MinIOVolume
    MOCVolumeBackupID
    ContainerName
    NERCVolumeBackupID
    NERCVolumeName
    +
  2. +
  3. +

    It is also helpful to have a text editor open so that you can insert + the values from the spreadsheet into the commands that need to be run.

    +
  4. +
+

Create a New MOC Mirror to NERC Instance

+
    +
  1. +

    Follow the instructions + here + to set up your instance.

    +

    Image Selection

    +
      +
    1. +

      When selecting the Image please select moc-nerc-migration + (highlighted in yellow above).

      +
    2. +
    3. +

      Once the Instance is Running move onto the next step

      +
    4. +
    +
  2. +
  3. +

    Name your new instance something you will remember, MirrorMOC2NERC + for example.

    +
  4. +
  5. +

    Assign a Floating IP to your new instance. If you need assistance please + review the Floating IP steps here.

    +
      +
    1. Your floating IPs will not be the same as the ones you had in the + MOC. Please claim new floating IPs to use.
    2. +
    +
  6. +
  7. +

    SSH into the MirrorMOC2NERC Instance. The user to use for login is centos. + If you have any trouble please review the SSH steps here.

    +
  8. +
+

Setup Application Credentials

+

Gather MOC Application Credentials

+
    +
  1. +

    Follow the instructions here to create your Application + Credentials.

    +
      +
    1. Make sure to save the clouds.yaml as clouds_MOC.yaml.
    2. +
    +
  2. +
+

Gathering NERC Application Credentials

+
    +
  1. +

    Follow the instructions under the header Command Line setup + here to create your Application Credentials.

    +
      +
    1. Make sure to save the clouds.yaml as clouds_NERC.yaml.
    2. +
    +
  2. +
+

Combine the two clouds.yaml files

+
    +
  1. +

    Make a copy of clouds_MOC.yaml and save as clouds.yaml

    +
  2. +
  3. +

    Open clouds.yaml in a text editor of your choice.

    +

    clouds.yaml MOC

    +
      +
    1. Change the openstack (highlighted in yellow above) value to moc + (highlighted in yellow two images below).
    2. +
    +
  4. +
  5. +

    Open clouds_NERC.yaml in a text editor of your choice.

    +

    clouds.yaml NERC

    +
      +
    1. +

      Change the openstack (highlighted in yellow above) value to nerc + (highlighted in green below).

      +
    2. +
    3. +

      Highlight and copy everything from nerc to the end of the line that + starts with auth_type

      +

      clouds.yaml Combined

      +
    4. +
    5. +

      Paste the copied text into clouds.yaml below the line that starts + with auth_type. Your new clouds.yaml will look similar to the image + above.

      +
    6. +
    +
  6. +
  7. +

    For further instructions on clouds.yaml files go + Here.

    +
  8. +
+

Moving Application Credentials to VM

+
    +
  1. +

    SSH into the VM created at the top of this page for example MirrorMOC2NERC.

    +
  2. +
  3. +

    Create the openstack config folder and empty clouds.yaml file.

    +
    mkdir -p ~/.config/openstack
    +cd ~/.config/openstack
    +touch clouds.yaml
    +
    +
  4. +
  5. +

    Open the clouds.yaml file in your favorite text editor. + (vi is preinstalled).

    +
  6. +
  7. +

    Copy the entire text inside the clouds.yaml file on your local computer.

    +
  8. +
  9. +

    Paste the contents of the local clouds.yaml file into the clouds.yaml + on the VM.

    +
  10. +
  11. +

    Save and exit your VM text editor.

    +
  12. +
+

Confirm the Instances are Shut Down

+
    +
  1. +

    Confirm the instances are Shut Down. This is a very important step + because we will be using the force modifier when we make our backup. The + volume can become corrupted if the Instance is not in a Shut Down state.

    +
  2. +
  3. +

    Log into the Instance page of the + MOC Dashboard

    +

    Instance Shutdown

    +
  4. +
  5. +

    Check the Power State of all of the instances you plan to migrate volumes + from are set to Shut Down (highlighted in yellow in image above).

    +
      +
    1. +

      If they are not please do so from the Actions Column.

      +

      Shut Off Instance

      +
        +
      1. +

        Click the drop down arrow under actions.

        +
      2. +
      3. +

        Select Shut Off Instance (blue arrow pointing to it in image + above).

        +
      4. +
      +
    2. +
    +
  6. +
+

Backup and Move Volume Data from MOC to NERC

+
    +
  1. SSH into the VM created at the top of this page. For steps on how to do + this please see instructions here.
  2. +
+

Create EC2 credentials in MOC & NERC

+
    +
  1. +

    Generate credentials for Kaizen with the command below.

    +
    openstack --os-cloud moc ec2 credentials create
    +
    +

    EC2 for MOC

    +
      +
    1. Copy the access (circled in red above) and secret (circled in blue + above) values into your table as <MOCAccess> and <MOCSecret>.
    2. +
    +
  2. +
  3. +

    Generate credentials for the NERC with the command below.

    +
    openstack --os-cloud nerc ec2 credentials create
    +
    +

    EC2 for NERC

    +
      +
    1. Copy the access (circled in red above) and secret (circled in blue + above) values into your table as as <NERCAccess> + and <NERCSecret>.
    2. +
    +
  4. +
+

Find Object Store Endpoints

+
    +
  1. +

    Look up information on the object-store service in MOC with the command + below.

    +
    openstack --os-cloud moc catalog show object-store -c endpoints
    +
    +

    MOC URL

    +
      +
    1. If the value is different than https://kzn-swift.massopen.cloud copy + the base URL for this service (circled in red above).
    2. +
    +
  2. +
  3. +

    Look up information on the object-store service in NERC with the command + below.

    +
    openstack --os-cloud nerc catalog show object-store -c endpoints
    +
    +

    NERC URL

    +
      +
    1. If the value is different than https://stack.nerc.mghpcc.org:13808 + copy the base URL for this service (circled in red above).
    2. +
    +
  4. +
+

Configure minio client aliases

+
    +
  1. +

    Create a MinIO alias for MOC using the base URL of the "public" + interface of the object-store service <MOCEndPoint> and the EC2 access key + (ex. <MOCAccess>) & secret key (ex. <MOCSecret>) from your table.

    +
    $ mc alias set moc https://kzn-swift.massopen.cloud <MOCAccess> <MOCSecret>
    +mc: Configuration written to `/home/centos/.mc/config.json`. Please update your access credentials.
    + mc: Successfully created `/home/centos/.mc/share`.
    +mc: Initialized share uploads `/home/centos/.mc/share/uploads.json` file.
    +mc: Initialized share downloads `/home/centos/.mc/share/downloads.json` file.
    +Added `moc` successfully.
    +
    +
  2. +
  3. +

    Create a MinIO alias for NERC using the base URL of the "public" + interface of the object-store service <NERCEndPoint> and the EC2 access key + (ex. <NERCAccess>) & secret key (ex. <NERCSecret>) from your table.

    +
    $ mc alias set nerc https://stack.nerc.mghpcc.org:13808 <NERCAccess> <NERCSecret>
    +Added `nerc` successfully.
    +
    +
  4. +
+

Backup MOC Volumes

+
    +
  1. +

    Locate the desired Volume UUID from the table you created in + Step 2 Gathering MOC Information.

    +
  2. +
  3. +

    Add the first Volume ID from your table to the code below in the + <MOCVolumeID> field and create a Container Name to replace the + <ContainerName> field. Container Name should be easy to remember as well + as unique so include your name. Maybe something like thomasa-backups.

    +
    openstack --os-cloud moc volume backup create --force --container <ContainerName> <MOCVolumeID>
    ++-------+---------------------+
    +| Field | Value               |
    ++-------+---------------------+
    +| id    | <MOCVolumeBackupID> |
    +| name  | None                |
    +
    +
      +
    1. Copy down your <MOCVolumeBackupID> to your table.
    2. +
    +
  4. +
  5. +

    Wait for the backup to become available. You can run the command below to + check on the status. If your volume is 25 or larger this might be a good time + to go get a warm beverage or lunch.

    +
    openstack --os-cloud moc volume backup list
    ++---------------------+------+-------------+-----------+------+
    +| ID                  | Name | Description | Status    | Size |
    ++---------------------+------+-------------+-----------+------+
    +| <MOCVolumeBackupID> | None | None        | creating  |   10 |
    +...
    +openstack --os-cloud moc volume backup list
    ++---------------------+------+-------------+-----------+------+
    +| ID                  | Name | Description | Status    | Size |
    ++---------------------+------+-------------+-----------+------+
    +| <MOCVolumeBackupID> | None | None        | available |   10 |
    +
    +
  6. +
+

Gather MinIO Volume data

+
    +
  1. Get the volume information for future commands. Use the same + <ContainerName> from when you created the volume backup. It is worth + noting that this value shares the ID number with the VolumeID.
    $ mc ls moc/<ContainerName>
    +[2022-04-29 09:35:16 EDT]     0B <MinIOVolume>/
    +
    +
  2. +
+

Create a Container on NERC

+
    +
  1. Create the NERC container that we will send the volume to. Use + the same <ContainerName> from when you created the volume backup.
    $ mc mb nerc/<ContainerName>
    +Bucket created successfully `nerc/<ContainerName>`.
    +
    +
  2. +
+

Mirror the Volume from MOC to NERC

+
    +
  1. Using the volume label from MinIO <MinIOVolume> and the <ContainerName> + for the command below you will kick off the move of your volume. This takes + around 30 sec per GB of data in your volume.
    $ mc mirror moc/<ContainerName>/<MinIOVolume> nerc/<ContainerName>/<MinIOVolume>
    +...123a30e_sha256file:  2.61GB / 2.61GB [=========...=========] 42.15Mib/s 1m3s
    +
    +
  2. +
+

Copy the Backup Record from MOC to NERC

+
    +
  1. +

    Now that we've copied the backup data into the NERC environment, we need + to register the backup with the NERC backup service. We do this by copying + metadata from MOC. You will need the original <MOCVolumeBackupID> you used + to create the original Backup.

    +
    openstack --os-cloud moc volume backup record export -f value <MOCVolumeBackupID> > record.txt
    +
    +
  2. +
  3. +

    Next we will import the record into NERC.

    +
    openstack --os-cloud nerc volume backup record import -f value $(cat record.txt)
    +<NERCVolumeBackupID>
    +None
    +
    +
      +
    1. Copy <NERCVolumeBackupID> value into your table.
    2. +
    +
  4. +
+

Create an Empty Volume on NERC to Receive the Backup

+
    +
  1. Create a volume in the NERC environment to receive the backup. This must be + the same size or larger than the original volume which can be changed by + modifying the <size> field. Remove the "--bootable" flag if you are not + creating a bootable volume. The <NERCVolumeName> field can be any name you + want, I would suggest something that will help you keep track of what instance + you want to attach it to. Make sure to fill in the table you created in + Step 2with the <NERCVolumeName> + value in the NERC Volume Name column.
    openstack --os-cloud nerc volume create --bootable --size <size> <NERCVolumeName>
    ++---------------------+----------------+
    +| Field               | Value          |
    ++---------------------+----------------+
    +| attachments         | []             |
    +| availability_zone   | nova           |
    +...
    +| id                  | <NERCVolumeID> |
    +...
    +| size                | <size>         |
    ++---------------------+----------------+
    +
    +
  2. +
+

Restore the Backup

+
    +
  1. +

    Restore the Backup to the Volume you just created.

    +
    openstack --os-cloud nerc volume backup restore <NERCVolumeBackupID> <NERCVolumeName>
    +
    +
  2. +
  3. +

    Wait for the volume to shift from restoring-backup to available.

    +
    openstack --os-cloud nerc volume list
    ++----------------+------------+------------------+------+-------------+
    +| ID             | Name       | Status           | Size | Attached to |
    ++----------------+------------+------------------+------+-------------+
    +| <NERCVolumeID> | MOC Volume | restoring-backup |    3 | Migration   |
    +openstack --os-cloud nerc volume list
    ++----------------+------------+-----------+------+-------------+
    +| ID             | Name       | Status    | Size | Attached to |
    ++----------------+------------+-----------+------+-------------+
    +| <NERCVolumeID> | MOC Volume | available |    3 | Migration   |
    +
    +
  4. +
  5. +

    Repeat these Backup and Move Volume + Data + steps for each volume you need to migrate.

    +
  6. +
+

Create NERC Instances Using MOC Volumes

+
    +
  1. +

    If you have volumes that need to be attached to an instance please follow + the next steps.

    +
  2. +
  3. +

    Follow the instructions here to set up your instance/s.

    +
      +
    1. +

      Instead of using an Image for your Boot Source you will use a Volume + (orange arrow in image below).

      +

      Volume Selection

      +
    2. +
    3. +

      Select the <NERCVolumeName> you created in step Create an Empty + Volume on NERC to Recieve the + Backup

      +
    4. +
    5. +

      The Flavor will be important as this decides how much vCPUs, RAM, + and Disk this instance will consume of your total.

      +
        +
      1. If for some reason the earlier approved resource quota is not + sufficient you can request further quota by following + these steps.
      2. +
      +
    6. +
    +
  4. +
  5. +

    Repeat this section + for each instance you need to create.

    +
  6. +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/migration-moc-to-nerc/Step4/index.html b/migration-moc-to-nerc/Step4/index.html new file mode 100644 index 00000000..08ee92d0 --- /dev/null +++ b/migration-moc-to-nerc/Step4/index.html @@ -0,0 +1,4595 @@ + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Remove Volume Backups to Conserve Storage

+

If you find yourself low on Volume Storage please follow the steps below to +remove your old Volume Backups. If you are very low on space you can do this +every time you finish copying a new volume to the NERC. If on the other hand +you have plety of remaining space feel free to leave all of your Volume +Backups as they are.

+
    +
  1. SSH into the MirrorMOC2NERC Instance. The user to use for + login is centos. If you have any trouble please review the SSH steps + here.
  2. +
+

Check Remaining MOC Volume Storage

+
    +
  1. +

    Log into the MOC Dashboard and go to Project > Compute > + Overview.

    +

    Volume Storage

    +
  2. +
  3. +

    Look at the Volume Storage meter (highlighted in yellow in image above).

    +
  4. +
+

Delete MOC Volume Backups

+
    +
  1. +

    Gather a list of current MOC Volume Backups with the command below.

    +
    openstack --os-cloud moc volume backup list
    ++---------------------+------+-------------+-----------+------+
    +| ID                  | Name | Description | Status    | Size |
    ++---------------------+------+-------------+-----------+------+
    +| <MOCVolumeBackupID> | None | None        | available |   10 |
    +
    +
  2. +
  3. +

    Only remove Volume Backups you are sure have been moved to the NERC. + with the command below you can delete Volume Backups.

    +
    openstack --os-cloud moc volume backup delete <MOCVolumeBackupID>
    +
    +
  4. +
  5. +

    Repeat the MOC Volume Backup section for + all MOC Volume Backups you wish to remove.

    +
  6. +
+

Delete MOC Container <ContainerName>

+

Remove the Container created i.e. <ContainerName> on MOC side with a unique name +during migration. Replace the <ContainerName> field with your own container name +created during migration process:

+
openstack --os-cloud moc container delete --recursive <ContainerName>
+
+

Verify the <ContainerName> is removed from MOC:

+
openstack --os-cloud moc container list
+
+

Check Remaining NERC Volume Storage

+
    +
  1. +

    Log into the NERC Dashboard and go to Project > Compute > + Overview.

    +

    Volume Storage

    +
  2. +
  3. +

    Look at the Volume Storage meter (highlighted in yellow in image above).

    +
  4. +
+

Delete NERC Volume Backups

+
    +
  1. +

    Gather a list of current NERC Volume Backups with the command below.

    +
    openstack --os-cloud nerc volume backup list
    ++---------------------+------+-------------+-----------+------+
    +| ID                  | Name | Description | Status    | Size |
    ++---------------------+------+-------------+-----------+------+
    +| <MOCVolumeBackupID> | None | None        | available |   3  |
    +
    +
  2. +
  3. +

    Only remove Volume Backups you are sure have been migrated to NERC Volumes. + Keep in mind that you might not have named the volume the same as on the MOC + so check your table from Step 2 to + confirm. You can confirm what Volumes you have in NERC with the following command.

    +
    openstack --os-cloud nerc volume list
    ++----------------+------------------+--------+------+----------------------------------+
    +| ID             | Name             | Status | Size | Attached to                      |
    ++----------------+------------------+--------+------+----------------------------------+
    +| <NERCVolumeID> | <NERCVolumeName> | in-use |    3 | Attached to MOC2NERC on /dev/vda |
    +
    +
  4. +
  5. +

    To remove volume backups please use the command below.

    +
    openstack --os-cloud nerc volume backup delete <MOCVolumeBackupID>
    +
    +
  6. +
  7. +

    Repeat the NERC Volume Backup section for + all NERC Volume Backups you wish to remove.

    +
  8. +
+

Delete NERC Container <ContainerName>

+

Remove the Container created i.e. <ContainerName> on NERC side with a unique name +during migration to mirror the Volume from MOC to NERC. Replace the <ContainerName> +field with your own container name created during migration process:

+
openstack --os-cloud nerc container delete --recursive <ContainerName>
+
+

Verify the <ContainerName> is removed from NERC:

+
openstack --os-cloud nerc container list
+
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/migration-moc-to-nerc/images/S1_ColdFront_Allocation.png b/migration-moc-to-nerc/images/S1_ColdFront_Allocation.png new file mode 100644 index 00000000..f9bc4643 Binary files /dev/null and b/migration-moc-to-nerc/images/S1_ColdFront_Allocation.png differ diff --git a/migration-moc-to-nerc/images/S1_ColdFront_Login.png b/migration-moc-to-nerc/images/S1_ColdFront_Login.png new file mode 100644 index 00000000..ba0b3554 Binary files /dev/null and b/migration-moc-to-nerc/images/S1_ColdFront_Login.png differ diff --git a/migration-moc-to-nerc/images/S1_ColdFront_ManageProject.png b/migration-moc-to-nerc/images/S1_ColdFront_ManageProject.png new file mode 100644 index 00000000..0864ea53 Binary files /dev/null and b/migration-moc-to-nerc/images/S1_ColdFront_ManageProject.png differ diff --git a/migration-moc-to-nerc/images/S1_ColdFront_Projects.png b/migration-moc-to-nerc/images/S1_ColdFront_Projects.png new file mode 100644 index 00000000..4d07beb1 Binary files /dev/null and b/migration-moc-to-nerc/images/S1_ColdFront_Projects.png differ diff --git a/migration-moc-to-nerc/images/S1_Dashboard_Instance.png b/migration-moc-to-nerc/images/S1_Dashboard_Instance.png new file mode 100644 index 00000000..aa447f2d Binary files /dev/null and b/migration-moc-to-nerc/images/S1_Dashboard_Instance.png differ diff --git a/migration-moc-to-nerc/images/S1_Dashboard_Instance_Details.png b/migration-moc-to-nerc/images/S1_Dashboard_Instance_Details.png new file mode 100644 index 00000000..cfed4be7 Binary files /dev/null and b/migration-moc-to-nerc/images/S1_Dashboard_Instance_Details.png differ diff --git a/migration-moc-to-nerc/images/S1_Dashboard_Instance_Name.png b/migration-moc-to-nerc/images/S1_Dashboard_Instance_Name.png new file mode 100644 index 00000000..11c5344f Binary files /dev/null and b/migration-moc-to-nerc/images/S1_Dashboard_Instance_Name.png differ diff --git a/migration-moc-to-nerc/images/S1_Dashboard_Login.png b/migration-moc-to-nerc/images/S1_Dashboard_Login.png new file mode 100644 index 00000000..403c7ea5 Binary files /dev/null and b/migration-moc-to-nerc/images/S1_Dashboard_Login.png differ diff --git a/migration-moc-to-nerc/images/S1_Dashboard_Login_CILogon.png b/migration-moc-to-nerc/images/S1_Dashboard_Login_CILogon.png new file mode 100644 index 00000000..2b20c694 Binary files /dev/null and b/migration-moc-to-nerc/images/S1_Dashboard_Login_CILogon.png differ diff --git a/migration-moc-to-nerc/images/S1_Dashboard_Project_Compute_Overview.png b/migration-moc-to-nerc/images/S1_Dashboard_Project_Compute_Overview.png new file mode 100644 index 00000000..aca160fe Binary files /dev/null and b/migration-moc-to-nerc/images/S1_Dashboard_Project_Compute_Overview.png differ diff --git a/migration-moc-to-nerc/images/S1_Dashboard_Project_VolumeBootable1.png b/migration-moc-to-nerc/images/S1_Dashboard_Project_VolumeBootable1.png new file mode 100644 index 00000000..e8e97881 Binary files /dev/null and b/migration-moc-to-nerc/images/S1_Dashboard_Project_VolumeBootable1.png differ diff --git a/migration-moc-to-nerc/images/S1_Dashboard_Project_VolumeBootable2.png b/migration-moc-to-nerc/images/S1_Dashboard_Project_VolumeBootable2.png new file mode 100644 index 00000000..5d3c9352 Binary files /dev/null and b/migration-moc-to-nerc/images/S1_Dashboard_Project_VolumeBootable2.png differ diff --git a/migration-moc-to-nerc/images/S1_Dashboard_Volume.png b/migration-moc-to-nerc/images/S1_Dashboard_Volume.png new file mode 100644 index 00000000..76e7995e Binary files /dev/null and b/migration-moc-to-nerc/images/S1_Dashboard_Volume.png differ diff --git a/migration-moc-to-nerc/images/S1_Dashboard_Volume_Details.png b/migration-moc-to-nerc/images/S1_Dashboard_Volume_Details.png new file mode 100644 index 00000000..541e8c7d Binary files /dev/null and b/migration-moc-to-nerc/images/S1_Dashboard_Volume_Details.png differ diff --git a/migration-moc-to-nerc/images/S1_Dashboard_Volume_Name.png b/migration-moc-to-nerc/images/S1_Dashboard_Volume_Name.png new file mode 100644 index 00000000..181d6f6d Binary files /dev/null and b/migration-moc-to-nerc/images/S1_Dashboard_Volume_Name.png differ diff --git a/migration-moc-to-nerc/images/S2_Login1.png b/migration-moc-to-nerc/images/S2_Login1.png new file mode 100644 index 00000000..eb39ce09 Binary files /dev/null and b/migration-moc-to-nerc/images/S2_Login1.png differ diff --git a/migration-moc-to-nerc/images/S2_Login2.png b/migration-moc-to-nerc/images/S2_Login2.png new file mode 100644 index 00000000..e75f654c Binary files /dev/null and b/migration-moc-to-nerc/images/S2_Login2.png differ diff --git a/migration-moc-to-nerc/images/S2_Login3.png b/migration-moc-to-nerc/images/S2_Login3.png new file mode 100644 index 00000000..70c9cd95 Binary files /dev/null and b/migration-moc-to-nerc/images/S2_Login3.png differ diff --git a/migration-moc-to-nerc/images/S2_OSticket1.png b/migration-moc-to-nerc/images/S2_OSticket1.png new file mode 100644 index 00000000..a5183c02 Binary files /dev/null and b/migration-moc-to-nerc/images/S2_OSticket1.png differ diff --git a/migration-moc-to-nerc/images/S2_OSticket2.png b/migration-moc-to-nerc/images/S2_OSticket2.png new file mode 100644 index 00000000..ee3217af Binary files /dev/null and b/migration-moc-to-nerc/images/S2_OSticket2.png differ diff --git a/migration-moc-to-nerc/images/S2_Project_Compute_Instance.png b/migration-moc-to-nerc/images/S2_Project_Compute_Instance.png new file mode 100644 index 00000000..65d3ad8f Binary files /dev/null and b/migration-moc-to-nerc/images/S2_Project_Compute_Instance.png differ diff --git a/migration-moc-to-nerc/images/S2_Project_Compute_Instance_Details.png b/migration-moc-to-nerc/images/S2_Project_Compute_Instance_Details.png new file mode 100644 index 00000000..a1c619bf Binary files /dev/null and b/migration-moc-to-nerc/images/S2_Project_Compute_Instance_Details.png differ diff --git a/migration-moc-to-nerc/images/S2_Project_Compute_Instance_Name.png b/migration-moc-to-nerc/images/S2_Project_Compute_Instance_Name.png new file mode 100644 index 00000000..d423b703 Binary files /dev/null and b/migration-moc-to-nerc/images/S2_Project_Compute_Instance_Name.png differ diff --git a/migration-moc-to-nerc/images/S2_Project_Network_SecurityGroup.png b/migration-moc-to-nerc/images/S2_Project_Network_SecurityGroup.png new file mode 100644 index 00000000..8ae5dcf3 Binary files /dev/null and b/migration-moc-to-nerc/images/S2_Project_Network_SecurityGroup.png differ diff --git a/migration-moc-to-nerc/images/S2_Project_Network_SecurityGroup_Details.png b/migration-moc-to-nerc/images/S2_Project_Network_SecurityGroup_Details.png new file mode 100644 index 00000000..cc0de0c2 Binary files /dev/null and b/migration-moc-to-nerc/images/S2_Project_Network_SecurityGroup_Details.png differ diff --git a/migration-moc-to-nerc/images/S2_Project_Network_SecurityGroup_Names.png b/migration-moc-to-nerc/images/S2_Project_Network_SecurityGroup_Names.png new file mode 100644 index 00000000..6223b499 Binary files /dev/null and b/migration-moc-to-nerc/images/S2_Project_Network_SecurityGroup_Names.png differ diff --git a/migration-moc-to-nerc/images/S2_Project_Volumes_Details.png b/migration-moc-to-nerc/images/S2_Project_Volumes_Details.png new file mode 100644 index 00000000..c4e0d4f0 Binary files /dev/null and b/migration-moc-to-nerc/images/S2_Project_Volumes_Details.png differ diff --git a/migration-moc-to-nerc/images/S2_Project_Volumes_Names.png b/migration-moc-to-nerc/images/S2_Project_Volumes_Names.png new file mode 100644 index 00000000..1cdd03b2 Binary files /dev/null and b/migration-moc-to-nerc/images/S2_Project_Volumes_Names.png differ diff --git a/migration-moc-to-nerc/images/S2_Project_Volumes_Volumes.png b/migration-moc-to-nerc/images/S2_Project_Volumes_Volumes.png new file mode 100644 index 00000000..ebfe2a69 Binary files /dev/null and b/migration-moc-to-nerc/images/S2_Project_Volumes_Volumes.png differ diff --git a/migration-moc-to-nerc/images/S3_CloudyamlCombined.png b/migration-moc-to-nerc/images/S3_CloudyamlCombined.png new file mode 100644 index 00000000..d5442381 Binary files /dev/null and b/migration-moc-to-nerc/images/S3_CloudyamlCombined.png differ diff --git a/migration-moc-to-nerc/images/S3_CloudyamlMOC.png b/migration-moc-to-nerc/images/S3_CloudyamlMOC.png new file mode 100644 index 00000000..c91cc4e9 Binary files /dev/null and b/migration-moc-to-nerc/images/S3_CloudyamlMOC.png differ diff --git a/migration-moc-to-nerc/images/S3_CloudyamlNERC.png b/migration-moc-to-nerc/images/S3_CloudyamlNERC.png new file mode 100644 index 00000000..5f628075 Binary files /dev/null and b/migration-moc-to-nerc/images/S3_CloudyamlNERC.png differ diff --git a/migration-moc-to-nerc/images/S3_EC2CredMOC.png b/migration-moc-to-nerc/images/S3_EC2CredMOC.png new file mode 100644 index 00000000..acee8179 Binary files /dev/null and b/migration-moc-to-nerc/images/S3_EC2CredMOC.png differ diff --git a/migration-moc-to-nerc/images/S3_EC2CredNERC.png b/migration-moc-to-nerc/images/S3_EC2CredNERC.png new file mode 100644 index 00000000..00a5e254 Binary files /dev/null and b/migration-moc-to-nerc/images/S3_EC2CredNERC.png differ diff --git a/migration-moc-to-nerc/images/S3_ImageSelection.png b/migration-moc-to-nerc/images/S3_ImageSelection.png new file mode 100644 index 00000000..50f79b19 Binary files /dev/null and b/migration-moc-to-nerc/images/S3_ImageSelection.png differ diff --git a/migration-moc-to-nerc/images/S3_InstanceShutdown.png b/migration-moc-to-nerc/images/S3_InstanceShutdown.png new file mode 100644 index 00000000..4f68d428 Binary files /dev/null and b/migration-moc-to-nerc/images/S3_InstanceShutdown.png differ diff --git a/migration-moc-to-nerc/images/S3_MOCEndpoint.png b/migration-moc-to-nerc/images/S3_MOCEndpoint.png new file mode 100644 index 00000000..4974cd8a Binary files /dev/null and b/migration-moc-to-nerc/images/S3_MOCEndpoint.png differ diff --git a/migration-moc-to-nerc/images/S3_NERCEndpoint.png b/migration-moc-to-nerc/images/S3_NERCEndpoint.png new file mode 100644 index 00000000..e2ed009a Binary files /dev/null and b/migration-moc-to-nerc/images/S3_NERCEndpoint.png differ diff --git a/migration-moc-to-nerc/images/S3_ShutOffInstance.png b/migration-moc-to-nerc/images/S3_ShutOffInstance.png new file mode 100644 index 00000000..182f60c2 Binary files /dev/null and b/migration-moc-to-nerc/images/S3_ShutOffInstance.png differ diff --git a/migration-moc-to-nerc/images/S3_VolumeSelect.png b/migration-moc-to-nerc/images/S3_VolumeSelect.png new file mode 100644 index 00000000..2cb77f4d Binary files /dev/null and b/migration-moc-to-nerc/images/S3_VolumeSelect.png differ diff --git a/migration-moc-to-nerc/images/S4_VolumeStorageMOC.png b/migration-moc-to-nerc/images/S4_VolumeStorageMOC.png new file mode 100644 index 00000000..92561fe0 Binary files /dev/null and b/migration-moc-to-nerc/images/S4_VolumeStorageMOC.png differ diff --git a/migration-moc-to-nerc/images/S4_VolumeStorageNERC.png b/migration-moc-to-nerc/images/S4_VolumeStorageNERC.png new file mode 100644 index 00000000..a43f36fb Binary files /dev/null and b/migration-moc-to-nerc/images/S4_VolumeStorageNERC.png differ diff --git a/openshift-ai/data-science-project/explore-the-jupyterlab-environment/index.html b/openshift-ai/data-science-project/explore-the-jupyterlab-environment/index.html new file mode 100644 index 00000000..772ae446 --- /dev/null +++ b/openshift-ai/data-science-project/explore-the-jupyterlab-environment/index.html @@ -0,0 +1,4782 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ +
+
+ + + +
+
+ + + + + + + +

Explore the JupyterLab Environment

+

When your workbench is ready, the status will change to Running and you can select +"Open" to go to your environment:

+

Open JupyterLab Environment

+
+

How can I start or stop a Workbench?

+

You can use this "toggle switch" under the "Status" section to easily start/stop +this environment later on.

+
+

Make sure you are selecting "mss-keycloak" once shown:

+

RHOAI JupyterLab Login with KeyCloak

+

Authorize the requested permissions if needed:

+

Authorize Access to the RHOAI

+

This will initiate your JupyterLab +environment based on the Jupyter Image you have selected. JupyterLab offers a +shared interactive integrated development environment.

+

Once you successfully authenticate you should see the NERC RHOAI JupyterLab Web +Interface as shown below:

+

RHOAI JupyterLab Web Interface

+

It's pretty empty right now, though. The first thing we will do is add content +into this environment by using Git.

+

Clone a Git repository

+

You can clone a Git repository in JupyterLab through the left-hand toolbar or +the Git menu option in the main menu as shown below:

+

JupyterLab Toolbar and Menu

+

Let's clone a repository using the left-hand toolbar. Click on the Git icon, +shown in below:

+

JupyterLab Git

+

Then click on Clone a Repository as shown below:

+

JupyterLab Git Actions

+

Enter the git repository URL, which points to the end-to-end ML workflows demo +project i.e. https://github.com/nerc-project/nerc_rhoai_mlops.

+

Then click Clone button as shown below:

+

NERC RHOAI MLOps Example Project

+
+

What is MLOps?

+

Machine learning operations (MLOps) are a set of practices that automate and +simplify machine learning (ML) workflows and deployments.

+
+

Cloning takes a few seconds, after which you can double-click and navigate to the +newly-created folder that contains your cloned Git repository.

+

Exploring the Example NERC MLOps Project

+

You will be able to find the newly-created folder named nerc_rhoai_mlops based +on the Git repository name, as shown below:

+

Git Clone Repo Folder on NERC RHOAI

+

Working with notebooks

+

What's a notebook?

+

A notebook is an environment where you have cells that can display formatted text, +or code.

+

This is an empty cell:

+

Jupyter Empty Cell

+

And a cell where we have entered some Python code:

+

Jupyter Cell With Python Code

+
    +
  • +

    Code cells contain Python code that can be run interactively. It means that you + can modify the code, then run it, but only for this cell, not for the whole + content of the notebook! The code will not run on your computer or in the browser, + but directly in the environment you are connected to NERC RHOAI.

    +
  • +
  • +

    To run a code cell, you simply select it (select the cell, or on the left side + of it), and select the Run/Play button from the toolbar (you can also press + CTRL+Enter to run a cell, or Shift+Enter to run the cell and automatically + select the following one).

    +
  • +
+

The Run button on the toolbar:

+

Jupyter Cell Run Button

+

As you will see, you then get the result of the code that was run in that cell +(if the code produces some output), as well as information on when this particular +cell has been run.

+

When you save a notebook, the code as well as all the results are saved! So you +can always reopen it to look at the results without having to run all the program +again, while still having access to the code that produced this content.

+
+

More about Notebook

+

Notebooks are so named because they are just like a physical Notebook. It is +exactly like if you were taking notes about your experiments (which you will +do), along with the code itself, including any parameters you set. You see +the output of the experiment inline (this is the result from a cell once it +is run), along with all the notes you want to take (to do that, you can +switch the cell type from the menu from Code to Markup).

+
+

Sample Jupyter Notebook files

+

In your Jupyter environment, you can navigate and select any Jupyter notebook +files by double-clicking them in the file explorer on the left side. Double-click +the notebook file to launch it. This action will open another tab in the content +section of the environment, on the right.

+

Here, you can find three primary starter notebooks for setting up the intelligent +application: 01_sandbox.ipynb, 02_model_training_basics.ipynb, and 03_remote_inference.ipynb +within the root folder path of nerc_rhoai_mlops.

+

You can click and run 01_sandbox.ipynb to verify the setup JupyterLab environment +can run python code properly.

+

Also, you can find the "samples" folder within the root folder path of nerc_rhoai_mlops. +For learning purposes, double-click on the "samples" folder under the newly-created +folder named nerc_rhoai_mlops. Within the "samples" folder, you'll find some starter +Jupyter notebook files: Intro.ipynb, Lorenz.ipynb, and gpu.ipynb. These files +can be used to test basic JupyterLab functionalities. You can explore them at +your own pace by running each of them individually. Please feel free to experiment, +run the different cells, add some more code. You can do what you want - it is your +environment, and there is no risk of breaking anything or impacting other users. +This environment isolation is also a great advantage brought by NERC RHOAI.

+
+

How to get access to the NERC RHOAI Dashboard from JupyterLab Environment?

+

If you had closed the NERC RHOAI dashboard, you can access it from your currently +opened JupyterLab IDE by clicking on File -> Hub Control Panel as shown below:

+

Jupyter Hub Control Panel Menu

+
+

Testing for GPU Code

+

As we have setup the workbench specifing the desired Number of GPUs: "1", we +will be able to test GPU based code running gpu.ipynb notebook file as shown below:

+

GPU Code Test

+

Training a model

+

Within the root folder path of nerc_rhoai_mlops, find a sample Jupyter notebook +file 02_model_training_basics.ipynb that demonstrates how to train a model within +the NERC RHOAI. To run it you need to double click it and execute the "Run" button +to run all notebook cells at once. This is used to train your model for "Basic +classification of clothing images" by importing the publicly available +Fashion MNIST dataset and using +TensorFlow. This process will take some time to complete. At the end, it will +generate and save the model my-model.keras within the root folder path of +nerc_rhoai_mlops.

+
+

The Machine Learning Model File Hosted on NERC OpenStack Object Bucket.

+

The model we are going to use is an object detection model that is able to +isolate and recognize T-shirts, bottles, and hats in pictures. Although the +process is globally the same one as what we have seen in the +previous section, this model has already been trained as +it takes a few hours with the help of a GPU to do it. If you want to know +more about this training process, you can have a look here.

+

The resulting model has been saved in the ONNX format, +an open standard for machine learning interoperability, which is one we can +use with OpenVINO and RHOAI model serving. The model has been stored and is +available for download in NERC OpenStack Object Storage container as described +here.

+
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openshift-ai/data-science-project/images/QR-code.png b/openshift-ai/data-science-project/images/QR-code.png new file mode 100644 index 00000000..25010f78 Binary files /dev/null and b/openshift-ai/data-science-project/images/QR-code.png differ diff --git a/openshift-ai/data-science-project/images/add-a-model-server.png b/openshift-ai/data-science-project/images/add-a-model-server.png new file mode 100644 index 00000000..b7d5f2fc Binary files /dev/null and b/openshift-ai/data-science-project/images/add-a-model-server.png differ diff --git a/openshift-ai/data-science-project/images/add-data-connection.png b/openshift-ai/data-science-project/images/add-data-connection.png new file mode 100644 index 00000000..751159ba Binary files /dev/null and b/openshift-ai/data-science-project/images/add-data-connection.png differ diff --git a/openshift-ai/data-science-project/images/authorize-access-to-the-rhoai.png b/openshift-ai/data-science-project/images/authorize-access-to-the-rhoai.png new file mode 100644 index 00000000..97626616 Binary files /dev/null and b/openshift-ai/data-science-project/images/authorize-access-to-the-rhoai.png differ diff --git a/openshift-ai/data-science-project/images/capture-camera-image.png b/openshift-ai/data-science-project/images/capture-camera-image.png new file mode 100644 index 00000000..e491938c Binary files /dev/null and b/openshift-ai/data-science-project/images/capture-camera-image.png differ diff --git a/openshift-ai/data-science-project/images/change-grpc-url-value.png b/openshift-ai/data-science-project/images/change-grpc-url-value.png new file mode 100644 index 00000000..85b80450 Binary files /dev/null and b/openshift-ai/data-science-project/images/change-grpc-url-value.png differ diff --git a/openshift-ai/data-science-project/images/configure-a-new-data-connection.png b/openshift-ai/data-science-project/images/configure-a-new-data-connection.png new file mode 100644 index 00000000..cb949ab4 Binary files /dev/null and b/openshift-ai/data-science-project/images/configure-a-new-data-connection.png differ diff --git a/openshift-ai/data-science-project/images/configure-a-new-model-server.png b/openshift-ai/data-science-project/images/configure-a-new-model-server.png new file mode 100644 index 00000000..047875d3 Binary files /dev/null and b/openshift-ai/data-science-project/images/configure-a-new-model-server.png differ diff --git a/openshift-ai/data-science-project/images/configure-and-deploy-model.png b/openshift-ai/data-science-project/images/configure-and-deploy-model.png new file mode 100644 index 00000000..221d5882 Binary files /dev/null and b/openshift-ai/data-science-project/images/configure-and-deploy-model.png differ diff --git a/openshift-ai/data-science-project/images/create-workbench.png b/openshift-ai/data-science-project/images/create-workbench.png new file mode 100644 index 00000000..5d0479e4 Binary files /dev/null and b/openshift-ai/data-science-project/images/create-workbench.png differ diff --git a/openshift-ai/data-science-project/images/data-connection-info.png b/openshift-ai/data-science-project/images/data-connection-info.png new file mode 100644 index 00000000..de013e79 Binary files /dev/null and b/openshift-ai/data-science-project/images/data-connection-info.png differ diff --git a/openshift-ai/data-science-project/images/data-science-project-details.png b/openshift-ai/data-science-project/images/data-science-project-details.png new file mode 100644 index 00000000..4eb76f4c Binary files /dev/null and b/openshift-ai/data-science-project/images/data-science-project-details.png differ diff --git a/openshift-ai/data-science-project/images/data-science-projects.png b/openshift-ai/data-science-project/images/data-science-projects.png new file mode 100644 index 00000000..1c8379d8 Binary files /dev/null and b/openshift-ai/data-science-project/images/data-science-projects.png differ diff --git a/openshift-ai/data-science-project/images/deployed-model-inference-endpoints.png b/openshift-ai/data-science-project/images/deployed-model-inference-endpoints.png new file mode 100644 index 00000000..851c0365 Binary files /dev/null and b/openshift-ai/data-science-project/images/deployed-model-inference-endpoints.png differ diff --git a/openshift-ai/data-science-project/images/gpu-code-test.png b/openshift-ai/data-science-project/images/gpu-code-test.png new file mode 100644 index 00000000..ddc1adf9 Binary files /dev/null and b/openshift-ai/data-science-project/images/gpu-code-test.png differ diff --git a/openshift-ai/data-science-project/images/intelligent-application-frontend-interface.png b/openshift-ai/data-science-project/images/intelligent-application-frontend-interface.png new file mode 100644 index 00000000..58624c8f Binary files /dev/null and b/openshift-ai/data-science-project/images/intelligent-application-frontend-interface.png differ diff --git a/openshift-ai/data-science-project/images/intelligent_application-topology.png b/openshift-ai/data-science-project/images/intelligent_application-topology.png new file mode 100644 index 00000000..c8410459 Binary files /dev/null and b/openshift-ai/data-science-project/images/intelligent_application-topology.png differ diff --git a/openshift-ai/data-science-project/images/intelligent_application_deployment-yaml-content.png b/openshift-ai/data-science-project/images/intelligent_application_deployment-yaml-content.png new file mode 100644 index 00000000..a510d633 Binary files /dev/null and b/openshift-ai/data-science-project/images/intelligent_application_deployment-yaml-content.png differ diff --git a/openshift-ai/data-science-project/images/jupyter-cell-with-code.png b/openshift-ai/data-science-project/images/jupyter-cell-with-code.png new file mode 100644 index 00000000..d0f61b78 Binary files /dev/null and b/openshift-ai/data-science-project/images/jupyter-cell-with-code.png differ diff --git a/openshift-ai/data-science-project/images/jupyter-empty-cell.png b/openshift-ai/data-science-project/images/jupyter-empty-cell.png new file mode 100644 index 00000000..2109bd62 Binary files /dev/null and b/openshift-ai/data-science-project/images/jupyter-empty-cell.png differ diff --git a/openshift-ai/data-science-project/images/jupyter-run-code-button.png b/openshift-ai/data-science-project/images/jupyter-run-code-button.png new file mode 100644 index 00000000..86710b43 Binary files /dev/null and b/openshift-ai/data-science-project/images/jupyter-run-code-button.png differ diff --git a/openshift-ai/data-science-project/images/jupyterlab-toolbar-main-menu.jpg b/openshift-ai/data-science-project/images/jupyterlab-toolbar-main-menu.jpg new file mode 100644 index 00000000..f09dce1c Binary files /dev/null and b/openshift-ai/data-science-project/images/jupyterlab-toolbar-main-menu.jpg differ diff --git a/openshift-ai/data-science-project/images/jupyterlab_git.png b/openshift-ai/data-science-project/images/jupyterlab_git.png new file mode 100644 index 00000000..e1874ba0 Binary files /dev/null and b/openshift-ai/data-science-project/images/jupyterlab_git.png differ diff --git a/openshift-ai/data-science-project/images/jupyterlab_git_actions.png b/openshift-ai/data-science-project/images/jupyterlab_git_actions.png new file mode 100644 index 00000000..dc958348 Binary files /dev/null and b/openshift-ai/data-science-project/images/jupyterlab_git_actions.png differ diff --git a/openshift-ai/data-science-project/images/jupyterlab_web_interface.png b/openshift-ai/data-science-project/images/jupyterlab_web_interface.png new file mode 100644 index 00000000..1c5f0a87 Binary files /dev/null and b/openshift-ai/data-science-project/images/jupyterlab_web_interface.png differ diff --git a/openshift-ai/data-science-project/images/juyter-hub-control-panel-menu.png b/openshift-ai/data-science-project/images/juyter-hub-control-panel-menu.png new file mode 100644 index 00000000..d59b9496 Binary files /dev/null and b/openshift-ai/data-science-project/images/juyter-hub-control-panel-menu.png differ diff --git a/openshift-ai/data-science-project/images/model-deployed-successful.png b/openshift-ai/data-science-project/images/model-deployed-successful.png new file mode 100644 index 00000000..97674d80 Binary files /dev/null and b/openshift-ai/data-science-project/images/model-deployed-successful.png differ diff --git a/openshift-ai/data-science-project/images/model-serving-deploy-model-option.png b/openshift-ai/data-science-project/images/model-serving-deploy-model-option.png new file mode 100644 index 00000000..0a6ffea0 Binary files /dev/null and b/openshift-ai/data-science-project/images/model-serving-deploy-model-option.png differ diff --git a/openshift-ai/data-science-project/images/model-test-object-detection.png b/openshift-ai/data-science-project/images/model-test-object-detection.png new file mode 100644 index 00000000..f61e2e3c Binary files /dev/null and b/openshift-ai/data-science-project/images/model-test-object-detection.png differ diff --git a/openshift-ai/data-science-project/images/nerc-mlops-git-repo.png b/openshift-ai/data-science-project/images/nerc-mlops-git-repo.png new file mode 100644 index 00000000..5bf97ea5 Binary files /dev/null and b/openshift-ai/data-science-project/images/nerc-mlops-git-repo.png differ diff --git a/openshift-ai/data-science-project/images/object-detection-via-phone.jpg b/openshift-ai/data-science-project/images/object-detection-via-phone.jpg new file mode 100644 index 00000000..e23673b0 Binary files /dev/null and b/openshift-ai/data-science-project/images/object-detection-via-phone.jpg differ diff --git a/openshift-ai/data-science-project/images/open-tensorflow-jupyter-lab.png b/openshift-ai/data-science-project/images/open-tensorflow-jupyter-lab.png new file mode 100644 index 00000000..6b68127c Binary files /dev/null and b/openshift-ai/data-science-project/images/open-tensorflow-jupyter-lab.png differ diff --git a/openshift-ai/data-science-project/images/openstack-bucket-storing-model-file.png b/openshift-ai/data-science-project/images/openstack-bucket-storing-model-file.png new file mode 100644 index 00000000..8f5f373b Binary files /dev/null and b/openshift-ai/data-science-project/images/openstack-bucket-storing-model-file.png differ diff --git a/openshift-ai/data-science-project/images/pre_post_processor_deployment-yaml-content.png b/openshift-ai/data-science-project/images/pre_post_processor_deployment-yaml-content.png new file mode 100644 index 00000000..a0afa8c6 Binary files /dev/null and b/openshift-ai/data-science-project/images/pre_post_processor_deployment-yaml-content.png differ diff --git a/openshift-ai/data-science-project/images/project-verify-yaml-editor.png b/openshift-ai/data-science-project/images/project-verify-yaml-editor.png new file mode 100644 index 00000000..82e60e6c Binary files /dev/null and b/openshift-ai/data-science-project/images/project-verify-yaml-editor.png differ diff --git a/openshift-ai/data-science-project/images/rhoai-git-cloned-repo.png b/openshift-ai/data-science-project/images/rhoai-git-cloned-repo.png new file mode 100644 index 00000000..ff812e2c Binary files /dev/null and b/openshift-ai/data-science-project/images/rhoai-git-cloned-repo.png differ diff --git a/openshift-ai/data-science-project/images/rhoai-jupyterlab-login.png b/openshift-ai/data-science-project/images/rhoai-jupyterlab-login.png new file mode 100644 index 00000000..47393117 Binary files /dev/null and b/openshift-ai/data-science-project/images/rhoai-jupyterlab-login.png differ diff --git a/openshift-ai/data-science-project/images/running-model-server.png b/openshift-ai/data-science-project/images/running-model-server.png new file mode 100644 index 00000000..2f2134c9 Binary files /dev/null and b/openshift-ai/data-science-project/images/running-model-server.png differ diff --git a/openshift-ai/data-science-project/images/switch-camera-view.png b/openshift-ai/data-science-project/images/switch-camera-view.png new file mode 100644 index 00000000..12b02268 Binary files /dev/null and b/openshift-ai/data-science-project/images/switch-camera-view.png differ diff --git a/openshift-ai/data-science-project/images/tensor-flow-workbench.png b/openshift-ai/data-science-project/images/tensor-flow-workbench.png new file mode 100644 index 00000000..122022ba Binary files /dev/null and b/openshift-ai/data-science-project/images/tensor-flow-workbench.png differ diff --git a/openshift-ai/data-science-project/images/workbench-cluster-storage.png b/openshift-ai/data-science-project/images/workbench-cluster-storage.png new file mode 100644 index 00000000..13399242 Binary files /dev/null and b/openshift-ai/data-science-project/images/workbench-cluster-storage.png differ diff --git a/openshift-ai/data-science-project/images/workbench-error-status.png b/openshift-ai/data-science-project/images/workbench-error-status.png new file mode 100644 index 00000000..59e04c66 Binary files /dev/null and b/openshift-ai/data-science-project/images/workbench-error-status.png differ diff --git a/openshift-ai/data-science-project/images/workbench-information.png b/openshift-ai/data-science-project/images/workbench-information.png new file mode 100644 index 00000000..578ff78f Binary files /dev/null and b/openshift-ai/data-science-project/images/workbench-information.png differ diff --git a/openshift-ai/data-science-project/images/yaml-import-new-content.png b/openshift-ai/data-science-project/images/yaml-import-new-content.png new file mode 100644 index 00000000..f3e4a58e Binary files /dev/null and b/openshift-ai/data-science-project/images/yaml-import-new-content.png differ diff --git a/openshift-ai/data-science-project/images/yaml-upload-plus-icon.png b/openshift-ai/data-science-project/images/yaml-upload-plus-icon.png new file mode 100644 index 00000000..498602ed Binary files /dev/null and b/openshift-ai/data-science-project/images/yaml-upload-plus-icon.png differ diff --git a/openshift-ai/data-science-project/model-serving-in-the-rhoai/index.html b/openshift-ai/data-science-project/model-serving-in-the-rhoai/index.html new file mode 100644 index 00000000..cbbeec96 --- /dev/null +++ b/openshift-ai/data-science-project/model-serving-in-the-rhoai/index.html @@ -0,0 +1,4737 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Model Serving in the NERC RHOAI

+

Prerequisites:

+

To run a model server and deploy a model on it, you need to have:

+ +

Create a data connection

+

Once we have our workbench and cluster storage set up, we can add data connections. +Click the "Add data connection" button to open the data connection configuration +window as shown below:

+

Add Data Connection

+

Data connections are configurations for remote data location. Within this window, +enter the information about the S3-compatible object bucket where the model is stored. +Enter the following information:

+
    +
  • +

    Name: The name you want to give to the data connection.

    +
  • +
  • +

    Access Key: The access key to the bucket.

    +
  • +
  • +

    Secret Key: The secret for the access key.

    +
  • +
  • +

    Endpoint: The endpoint to connect to the storage.

    +
  • +
  • +

    Region: The region to connect to the storage.

    +
  • +
  • +

    Bucket: The name of the bucket.

    +
  • +
+

NOTE: However, you are not required to use the S3 service from Amazon Web +Services (AWS). Any S3-compatible storage i.e. NERC OpenStack Container (Ceph), +Minio, AWS S3, etc. is supported.

+

Configure and Add A New Data Connection

+

For our example project, let's name it "ocp-nerc-container-connect", we'll select +the "us-east-1" as Region, choose "ocp-container" as Bucket.

+

The API Access EC2 credentials can be downloaded and accessed from the NERC OpenStack +Project as described here. +This credential file contains information regarding Access Key, +Secret Key, and Endpoint.

+

Very Important Note: If you are using an AWS S3 bucket, the Endpoint +needs to be set as https://s3.amazonaws.com/. However, for the NERC Object Storage +container, which is based on the Ceph backend, the Endpoint needs to be set +as https://stack.nerc.mghpcc.org:13808, and the Region should be set as us-east-1.

+
+

How to store & connect to the model file in the object storage bucket?

+

The model file(s) should have been saved into an S3-compatible object storage +bucket (NERC OpenStack Container [Ceph], Minio, or AWS S3) for which you must +have the connection information, such as location and credentials. You can +create a bucket on your active project at the NERC OpenStack Project by following +the instructions in this guide.

+

The API Access EC2 credentials can be downloaded and accessed from the NERC +OpenStack Project as described here.

+

For our example project, we are creating a bucket named "ocp-container" in +one of our NERC OpenStack project's object storage. Inside this bucket, we +have added a folder or directory called "coolstore-model", where we will +store the model file in ONNX format, as shown here:

+

NERC OpenStack Container Storing Model File

+

ONNX: An open standard for machine learning interoperability.

+
+

After completing the required fields, click Add data connection. You should +now see the data connection displayed in the main project window as shown below:

+

New Data Connection Info

+

Create a model server

+

After creating the data connection, you can add your model server. Select +Add server as shown below:

+

Add A Model Server

+

In the pop-up window that appears, depicted as shown below, you can specify the +following details:

+

Configure A New Model Server

+
    +
  • +

    Model server name

    +
  • +
  • +

    Serving runtime: either "OpenVINO Model Server" or "OpenVINO Model Server + (Supports GPUs)"

    +
  • +
  • +

    Number of model server replicas: This is the number of instances of the + model server engine that you want to deploy. You can scale it up as needed, + depending on the number of requests you will receive.

    +
  • +
  • +

    Model server size: This is the amount of resources, CPU, and RAM that will + be allocated to your server. Select the appropriate configuration for size and + the complexity of your model.

    +
  • +
  • +

    Model route: Check this box if you want the serving endpoint (the model serving + API) to be accessible outside of the OpenShift cluster through an external route.

    +
  • +
  • +

    Token authorization: Check this box if you want to secure or restrict access + to the model by forcing requests to provide an authorization token.

    +
  • +
+

After adding and selecting options within the Add model server pop-up +window, click Add to create the model server.

+

For our example project, let's name the Model server as "coolstore-modelserver". +We'll select the OpenVINO Model Server in Serving runtime. Leave replicas +to "1", size to "Small". At this point, don't check +Make model available via an external route as shown below:

+

Running Model Server

+
+

NERC RHOAI supported Model Server Runtimes

+

NERC RHOAI integrates the Intel's OpenVINO Model Server +runtime, a high-performance system for serving models, optimized for deployment +on Intel architectures. Also, NERC RHOAI offers OpenVINO Model Server serving +runtime that supports GPUs.

+
+

Once you've configured your model server, you can deploy your model by clicking +on "Deploy model" located on the right side of the running model server. Alternatively, +you can also do this from the main RHOAI dashboard's "Model Serving" menu item as +shown below:

+

Model Serving Deploy Model Option

+

If you wish to view details for the model server, click on the link corresponding +to the Model Server's Name. You can also modify a model server configuration by +clicking on the three dots on the right side, and selecting Edit model server. +This will bring back the same configuration page we used earlier. This menu also +have option for you to delete the model server.

+

Deploy the model

+

To add a model to be served, click the Deploy model button. Doing so will +initiate the Deploy model pop-up window as shown below:

+

Configure and Deploy Model Info

+

Enter the following information for your new model:

+
    +
  • +

    Model Name: The name you want to give to your model (e.g., "coolstore").

    +
  • +
  • +

    Model framework (name-version): The framework used to save this model. + At this time, OpenVINO IR or ONNX or Tensorflow are supported.

    +
  • +
  • +

    Model location: Select the data connection that you created to store the + model. Alternatively, you can create another data connection directly from this + menu.

    +
  • +
  • +

    Folder path: If your model is not located at the root of the bucket of your + data connection, you must enter the path to the folder it is in.

    +
  • +
+

For our example project, let's name the Model as "coolstore", select +"onnx-1" for the framework, select the Data location you created before for the +Model location, and enter "coolstore-model" as the folder path for the model +(without leading /).

+

When you are ready to deploy your model, select the Deploy button.

+

When you return to the Deployed models page, you will see your newly deployed model. +You should click on the 1 on the Deployed models tab to see details. When the +model has finished deploying, the status icon will be a green checkmark indicating +the model deployment is complete as shown below:

+

Model Deployed Successfully

+

The model is now accessible through the API endpoint of the model server. The +information about the endpoint is different, depending on how you configured the +model server.

+

If you did not expose the model externally through a route, click on the Internal +Service link in the Inference endpoint section. A popup will display the address +for the gRPC and the REST URLs for the inference endpoints as shown below:

+

Successfully Deployed Model Inference endpoints Info

+

Notes:

+
    +
  • +

    The REST URL displayed is only the base address of the endpoint. You must + append /v2/models/name-of-your-model/infer to it to have the full address. + Example: http://modelmesh-serving.model-serving:8008/v2/models/coolstore/infer

    +
  • +
  • +

    The full documentation of the API (REST and gRPC) is available here.

    +
  • +
  • +

    The gRPC proto file for the Model Server is available here.

    +
  • +
  • +

    If you have exposed the model through an external route, the Inference endpoint + displays the full URL that you can copy.

    +
  • +
+
+

Important Note

+

Even when you expose the model through an external route, the internal ones +are still available. They use this format:

+
    +
  • +

    REST: http://modelmesh-serving.name-of-your-project:8008/v2/models/name-of-your-model/infer

    +
  • +
  • +

    gRPC: grpc://modelmesh-serving.name-of-your-project:8033. Please make +note of the grpc URL value, we will need it later.

    +
  • +
+
+

Your model is now deployed and ready to use!

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openshift-ai/data-science-project/testing-model-in-the-rhoai/index.html b/openshift-ai/data-science-project/testing-model-in-the-rhoai/index.html new file mode 100644 index 00000000..b0c39807 --- /dev/null +++ b/openshift-ai/data-science-project/testing-model-in-the-rhoai/index.html @@ -0,0 +1,4747 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Test the Model in the NERC RHOAI

+

Now that the model server is ready to receive requests, +we can test it.

+
+

How to get access to the NERC RHOAI Dashboard from JupyterLab Environment?

+

If you had closed the NERC RHOAI dashboard, you can access it from your currently +opened JupyterLab IDE by clicking on File -> Hub Control Panel as shown below:

+

Jupyter Hub Control Panel Menu

+
+
    +
  • +

    In your project in JupyterLab, open the notebook 03_remote_inference.ipynb + and follow the instructions to see how the model can be queried.

    +
  • +
  • +

    Update the grpc_url as noted before + for the the grpc URL value from the deployed model on the NERC RHOAI Model + server.

    +

    Change grpc URL Value

    +
  • +
  • +

    Once you've completed the notebook's instructions, the object detection model + can isolate and recognize T-shirts, bottles, and hats in pictures, as shown below:

    +

    Model Test to Detect Objects In An Image

    +
  • +
+

Building and deploying an intelligent application

+

The application we are going to deploy is a simple example of how you can add an +intelligent feature powered by AI/ML to an application. It is a webapp that you +can use on your phone to discover coupons on various items you can see in a store, +in an augmented reality way.

+

Architecture

+

The different components of this intelligent application are:

+

The Frontend: a React application, typically running on the browser of your +phone,

+

The Backend: a NodeJS server, serving the application and relaying API calls,

+

The Pre-Post Processing Service: a Python FastAPI service, doing the image +pre-processing, calling the model server API, and doing the post-processing before +sending the results back.

+

The Model Server: the RHOAI component serving the model as an API to do +the inference.

+

Application Workflow Steps

+
    +
  1. +

    Pass the image to the pre-post processing service

    +
  2. +
  3. +

    Pre-process the image and call the model server

    +
  4. +
  5. +

    Send back the inference result

    +
  6. +
  7. +

    Post-process the inference and send back the result

    +
  8. +
  9. +

    Pass the result to the frontend for display

    +
  10. +
+

Deploy the application

+

The deployment of the application is really easy, as we already created for you +the necessary YAML files. They are included in the Git project we used for this +example project. You can find them in the deployment folder inside your JupyterLab +environment, or directly here.

+

To deploy the Pre-Post Processing Service service and the Application:

+
    +
  • +

    From your NERC's OpenShift Web Console, + navigate to your project corresponding to the NERC RHOAI Data Science Project + and select the "Import YAML" button, represented by the "+" icon in the top + navigation bar as shown below:

    +

    YAML Add Icon

    +
  • +
  • +

    Verify that you selected the correct project.

    +

    Correct Project Selected for YAML Editor

    +
  • +
  • +

    Copy/Paste the content of the file pre_post_processor_deployment.yaml inside + the opened YAML editor. If you have named your model coolstore as instructed, + you're good to go. If not, modify the value on line # 35 + with the name you set. You can then click the Create button as shown below:

    +

    YAML Editor Add Pre-Post Processing Service Content

    +
  • +
  • +

    Once Resource is successfully created, you will see the following screen:

    +

    Resources successfully created Importing More YAML

    +
  • +
  • +

    Click on "Import more YAML" and Copy/Paste the content of the file intelligent_application_deployment.yaml + inside the opened YAML editor. Nothing to change here, you can then click the + Create button as shown below:

    +

    YAML Editor Pre-Post Processing Service Content

    +
  • +
  • +

    If both deployments are successful, you will be able to see both of them grouped + under "intelligent-application" on the Topology View menu, as shown below:

    +

    Intelligent Application Under Topology

    +
  • +
+

Use the application

+

The application is relatively straightforward to use. Click on the URL for the +Route ia-frontend that was created.

+

You have first to allow it to use your camera, this is the interface you get:

+

Intelligent Application Frontend Interface

+

You have:

+
    +
  • +

    The current view of your camera.

    +
  • +
  • +

    A button to take a picture as shown here:

    +

    Capture Camera Image

    +
  • +
  • +

    A button to switch from front to rear camera if you are using a phone:

    +

    Switch Camera View

    +
  • +
  • +

    A QR code that you can use to quickly open the application on a phone + (much easier than typing the URL!):

    +

    QR code

    +
  • +
+

When you take a picture, it will be sent to the inference service, and you will +see which items have been detected, and if there is a promotion available as shown +below:

+

Object Detection Via Phone Camera

+

Tweak the application

+

There are two parameters you can change on this application:

+
    +
  • +

    On the ia-frontend Deployment, you can modify the DISPLAY_BOX environment + variable from true to false. It will hide the bounding box and the inference + score, so that you get only the coupon flying over the item.

    +
  • +
  • +

    On the ia-inference Deployment, the one used for pre-post processing, you + can modify the COUPON_VALUE environment variable. The format is simply an + Array with the value of the coupon for the 3 classes: bottle, hat, shirt. As + you see, these values could be adjusted in real time, and this could even be + based on another ML model!

    +
  • +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openshift-ai/data-science-project/using-projects-the-rhoai/index.html b/openshift-ai/data-science-project/using-projects-the-rhoai/index.html new file mode 100644 index 00000000..a7cb81cf --- /dev/null +++ b/openshift-ai/data-science-project/using-projects-the-rhoai/index.html @@ -0,0 +1,4646 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Using Your Data Science Project (DSP)

+

You can access your current projects by navigating to the "Data Science Projects" +menu item on the left-hand side, as highlighted in the figure below:

+

Data Science Projects

+

If you have any existing projects, they will be displayed here. These projects +correspond to your NERC-OCP (OpenShift) resource allocations.

+
+

Why we need Data Science Project (DSP)?

+

To implement a data science workflow, you must use a data science project. +Projects allow you and your team to organize and collaborate on resources +within separated namespaces. From a project you can create multiple workbenches, +each with their own Jupyter notebook environment, and each with their own data +connections and cluster storage. In addition, the workbenches can share models +and data with pipelines and model servers.

+
+

Selecting your data science project

+

Here, you can click on specific projects corresponding to the appropriate allocation +where you want to work. This brings you to your selected data science project's +details page, as shown below:

+

Data Science Project's Details

+

Within the data science project, you can add the following configuration options:

+
    +
  • +

    Workbenches: Development environments within your project where you can access + notebooks and generate models.

    +
  • +
  • +

    Cluster storage: Storage for your project in your OpenShift cluster.

    +
  • +
  • +

    Data connections: A list of data sources that your project uses.

    +
  • +
  • +

    Pipelines: A list of created and configured pipeline servers.

    +
  • +
  • +

    Models and model servers: A list of models and model servers that your project + uses.

    +
  • +
+

As you can see in the project's details figure, our selected data science project +currently has no workbenches, storage, data connections, pipelines, or model servers.

+

Populate the data science project with a Workbench

+

Add a workbench by clicking the Create workbench button as shown below:

+

Create Workbench

+
+

What are Workbenches?

+

Workbenches are development environments. They can be based on JupyterLab, but +also on other types of IDEs, like VS Code or RStudio. You can create as many +workbenches as you want, and they can run concurrently.

+
+

On the Create workbench page, complete the following information.

+

Note: Not all fields are required.

+
    +
  • +

    Name

    +
  • +
  • +

    Description

    +
  • +
  • +

    Notebook image (Image selection)

    +
  • +
  • +

    Deployment size (Container size and Number of GPUs)

    +
  • +
  • +

    Environment variables

    +
  • +
  • +

    Cluster storage name

    +
  • +
  • +

    Cluster storage description

    +
  • +
  • +

    Persistent storage size

    +
  • +
  • +

    Data connections

    +
  • +
+
+

How to specify CPUs, Memory, and GPUs for your JupyterLab workbench?

+

You have the option to select different container sizes to define compute +resources, including CPUs and memory. Each container size comes with pre-configured +CPU and memory resources.

+

Optionally, you can specify the desired Number of GPUs depending on the +nature of your data analysis and machine learning code requirements. However, +this number should not exceed the GPU quota specified by the value of the +"OpenShift Request on GPU Quota" attribute that has been approved for +this "NERC-OCP (OpenShift)" resource allocation on NERC's ColdFront, as +described here.

+

If you need to increase this quota value, you can request a change as +explained here.

+
+

Once you have entered the information for your workbench, click Create.

+

Fill Workbench Information

+

For our example project, let's name it "Tensorflow Workbench". We'll select the +TensorFlow image, choose a Deployment size of Small, Number of GPUs +as 1 and allocate a Cluster storage space of 1GB.

+
+

More About Cluster Storage

+

Cluster storage consists of Persistent Volume Claims (PVCs), which are +persistent storage spaces available for storing your notebooks and data. You +can create PVCs directly from here and mount them in your workbenches as +needed. It's worth noting that a default cluster storage (PVC) is automatically +created with the same name as your workbench to save your work.

+
+

After creating the workbench, you will return to your project page. It shows the +status of the workbench as shown below:

+

Workbench and Cluster Storage

+

Notice that under the status indicator the workbench is Running. However, if any +issues arise, such as an "exceeded quota" error, a red exclamation mark will appear +under the Status indicator, as shown in the example below:

+

Workbench Error Status

+

You can hover over that icon to view details. Upon closer inspection of the error +message and the "Event log", you will receive details about the issue, enabling +you to resolve it accordingly.

+

When your workbench is ready and the status changes to Running, you can select +"Open" to access your environment:

+

Open JupyterLab Environment

+
+

How can I start or stop a Workbench?

+

You can use this "toggle switch" under the "Status" section to easily start/stop +this environment later on.

+
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openshift-ai/get-started/rhoai-overview/index.html b/openshift-ai/get-started/rhoai-overview/index.html new file mode 100644 index 00000000..d0b87113 --- /dev/null +++ b/openshift-ai/get-started/rhoai-overview/index.html @@ -0,0 +1,4589 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Red Hat OpenShift AI (RHOAI) Overview

+

RHOAI offers a versatile and scalable MLOps solution equipped with tools for +rapid constructing, deploying, and overseeing AI-driven applications. Integrating +the proven features of both Red Hat OpenShift AI and Red Hat OpenShift creates a +comprehensive enterprise-grade artificial intelligence and machine learning (AI/ML) +application platform, facilitating collaboration among data scientists, engineers, +and app developers. This consolidated platform promotes consistency, security, +and scalability, fostering seamless teamwork across disciplines and empowering +teams to quickly explore, build, train, deploy, test machine learning models, and +scale AI-enabled intelligent applications.

+

Formerly known as Red Hat OpenShift Data Science, OpenShift AI facilitates the +complete journey of AI/ML experiments and models. OpenShift AI enables data +acquisition and preparation, model training and fine-tuning, model serving and +model monitoring, hardware acceleration, and distributed workloads using +graphics processing unit (GPU) resources.

+

AI for All

+

Recent enhancements to Red Hat OpenShift AI include:

+
    +
  • +

    Implementation Deployment pipelines for monitoring AI/ML experiments and + automating ML workflows accelerate the iteration process for data scientists + and developers of intelligent applications. This integration facilitates swift + iteration on machine learning projects and embeds automation into application + deployment and updates.

    +
  • +
  • +

    Model serving now incorporates GPU assistance for inference tasks and custom + model serving runtimes, enhancing inference performance and streamlining the + deployment of foundational models.

    +
  • +
  • +

    With Model monitoring, organizations can oversee performance and operational + metrics through a centralized dashboard, enhancing management capabilities.

    +
  • +
+

Red Hat OpenShift AI ecosystem

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameDescription
AI/ML modeling and visualization toolsJupyterLab UI with prebuilt notebook images and common Python libraries and packages; TensorFlow; PyTorch, CUDA; and also support for custom notebook images
Data engineeringSupport for different Data Engineering third party tools (optional)
Data ingestion and storageSupports Amazon Simple Storage Service (S3) and NERC OpenStack Object Storage
GPU supportAvailable NVIDIA GPU Devices (with GPU operator): NVIDIA A100-SXM4-40GB and V100-PCIE-32GB
Model serving and monitoringModel serving (KServe with user interface), model monitoring, OpenShift Source-to-Image (S2I), Red Hat OpenShift API Management (optional add-on), Intel Distribution of the OpenVINO toolkit
Data science pipelinesData science pipelines (Kubeflow Pipelines) chain together processes like data preparation, build models, and serve models
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openshift-ai/index.html b/openshift-ai/index.html new file mode 100644 index 00000000..fe452cfc --- /dev/null +++ b/openshift-ai/index.html @@ -0,0 +1,4597 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Red Hat OpenShift AI (RHOAI) Tutorial Index

+

If you're just starting out, we recommend starting from Red Hat OpenShift AI +(RHOAI) Overview and going through the tutorial +in order.

+

If you just need to review a specific step, you can find the page you need in +the list below.

+

NERC OpenShift AI Getting Started

+ +

NERC OpenShift AI dashboard

+ +

Using Data Science Project in the NERC RHOAI

+ +

Other Example Projects

+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openshift-ai/logging-in/access-the-rhoai-dashboard/index.html b/openshift-ai/logging-in/access-the-rhoai-dashboard/index.html new file mode 100644 index 00000000..79577efa --- /dev/null +++ b/openshift-ai/logging-in/access-the-rhoai-dashboard/index.html @@ -0,0 +1,4469 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Access the NERC's OpenShift AI dashboard

+

Access the NERC's OpenShift Web Console +via the web browser as described here.

+

Make sure you are selecting "mss-keycloak" as shown here:

+

OpenShift Login with KeyCloak

+

Once you successfully authenticate you should see the NERC OpenShift Web Console +as shown below:

+

OpenShift Web Console

+

After logging in to the NERC OpenShift console, access the NERC's Red Hat OpenShift +AI dashboard by clicking the application launcher icon (the black-and-white +icon that looks like a grid), located on the header as shown below:

+

The NERC RHOAI Link

+

OpenShift AI uses the same credentials as OpenShift for the dashboard, notebooks, +and all other components. When prompted, log in to the OpenShift AI dashboard by +using your OpenShift credentials by clicking "Log In With OpenShift" button +as shown below:

+

Log In With OpenShift

+

After the NERC OpenShift AI dashboard launches, it displays all currently enabled +applications.

+

The NERC RHOAI Dashboard

+

You can return to OpenShift Web Console by using the application launcher icon +(the black-and-white icon that looks like a grid), and choosing the "OpenShift +Console" as shown below:

+

The NERC OpenShift Web Console Link

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openshift-ai/logging-in/images/CILogon_interface.png b/openshift-ai/logging-in/images/CILogon_interface.png new file mode 100644 index 00000000..fd1c073f Binary files /dev/null and b/openshift-ai/logging-in/images/CILogon_interface.png differ diff --git a/openshift-ai/logging-in/images/authorize-access-to-the-rhoai.png b/openshift-ai/logging-in/images/authorize-access-to-the-rhoai.png new file mode 100644 index 00000000..97626616 Binary files /dev/null and b/openshift-ai/logging-in/images/authorize-access-to-the-rhoai.png differ diff --git a/openshift-ai/logging-in/images/log_in_with_openshift.png b/openshift-ai/logging-in/images/log_in_with_openshift.png new file mode 100644 index 00000000..9a2b73e6 Binary files /dev/null and b/openshift-ai/logging-in/images/log_in_with_openshift.png differ diff --git a/openshift-ai/logging-in/images/openshift-web-console.png b/openshift-ai/logging-in/images/openshift-web-console.png new file mode 100644 index 00000000..3a35b98a Binary files /dev/null and b/openshift-ai/logging-in/images/openshift-web-console.png differ diff --git a/openshift-ai/logging-in/images/openshift_login.png b/openshift-ai/logging-in/images/openshift_login.png new file mode 100644 index 00000000..025ab7d0 Binary files /dev/null and b/openshift-ai/logging-in/images/openshift_login.png differ diff --git a/openshift-ai/logging-in/images/the-nerc-openshift-web-console-link.png b/openshift-ai/logging-in/images/the-nerc-openshift-web-console-link.png new file mode 100644 index 00000000..1debfb1a Binary files /dev/null and b/openshift-ai/logging-in/images/the-nerc-openshift-web-console-link.png differ diff --git a/openshift-ai/logging-in/images/the-rhoai-dashboard.png b/openshift-ai/logging-in/images/the-rhoai-dashboard.png new file mode 100644 index 00000000..418c0a37 Binary files /dev/null and b/openshift-ai/logging-in/images/the-rhoai-dashboard.png differ diff --git a/openshift-ai/logging-in/images/the-rhoai-link.png b/openshift-ai/logging-in/images/the-rhoai-link.png new file mode 100644 index 00000000..c1603588 Binary files /dev/null and b/openshift-ai/logging-in/images/the-rhoai-link.png differ diff --git a/openshift-ai/logging-in/the-rhoai-dashboard-overview/index.html b/openshift-ai/logging-in/the-rhoai-dashboard-overview/index.html new file mode 100644 index 00000000..6b2cec58 --- /dev/null +++ b/openshift-ai/logging-in/the-rhoai-dashboard-overview/index.html @@ -0,0 +1,4499 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

The NERC's OpenShift AI dashboard Overview

+

In the NERC's RHOAI dashboard, you can see multiple links on your left hand side.

+
    +
  1. +

    Applications:

    +
      +
    • +

      Enabled: Launch your enabled applications, view documentation, or get + started with quick start instructions and tasks.

      +
    • +
    • +

      Explore: View optional applications for your RHOAI instance.

      +

      NOTE: Most of them are disabled by default on NERC RHOAI right now.

      +
    • +
    +
  2. +
  3. +

    Data Science Projects: View your existing projects. This will show different + projects corresponding to your NERC-OCP (OpenShift) resource allocations. Here, + you can choose specific projects corresponding to the appropriate allocation + where you want to work. Within these projects, you can create workbenches, + deploy various development environments (such as Jupyter Notebooks, VS Code, + RStudio, etc.), add data connections, or serve models.

    +
    +

    What are Workbenches?

    +

    Workbenches are development environments. They can be based on JupyterLab, +but also on other types of IDEs, like VS Code or RStudio. You can create +as many workbenches as you want, and they can run concurrently.

    +
    +
  4. +
  5. +

    Data Science Pipelines:

    +
      +
    • +

      Pipelines: Manage your pipelines for a specific project selected from the + dropdown menu.

      +
    • +
    • +

      Runs: Manage and view your runs for a specific project selected from the + dropdown menu.

      +
    • +
    +
  6. +
  7. +

    Model Serving: Manage and view the health and performance of your deployed + models across different projects corresponding to your NERC-OCP (OpenShift) + resource allocations. Also, you can "Deploy Model" to a specific project selected + from the dropdown menu here.

    +
  8. +
  9. +

    Resources: Access all learning resources that Resources showcases various + tutorials or demos helping your onboarding to the RHOAI platform.

    +
  10. +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openshift-ai/other-projects/configure-jupyter-notebook-use-gpus-aiml-modeling/index.html b/openshift-ai/other-projects/configure-jupyter-notebook-use-gpus-aiml-modeling/index.html new file mode 100644 index 00000000..19540a15 --- /dev/null +++ b/openshift-ai/other-projects/configure-jupyter-notebook-use-gpus-aiml-modeling/index.html @@ -0,0 +1,4833 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Configure a Jupyter notebook to use GPUs for AI/ML modeling

+

Prerequisites:

+

Prepare your Jupyter notebook server for using a GPU, you need to have:

+ +

Please ensure that you start your Jupyter notebook server with options as depicted +in the following configuration screen. This screen provides you with the opportunity +to select a notebook image and configure its options, including the number of GPUs.

+

PyTorch Workbench Information

+

For our example project, let's name it "PyTorch Workbench". We'll select the +PyTorch image, choose a Deployment size of Small, Number of GPUs +as 1 and allocate a Cluster storage space of 1GB.

+

If this procedure is successful, you have started your Jupyter notebook server. +When your workbench is ready, the status will change to Running and you can select +"Open" to go to your environment:

+

Open JupyterLab Environment

+

Once you successfully authenticate you should see the NERC RHOAI JupyterLab Web +Interface as shown below:

+

RHOAI JupyterLab Web Interface

+

It's pretty empty right now, though. On the left side of the navigation pane, +locate the Name explorer panel. This panel is where you can create and manage +your project directories.

+

Clone a GitHub Repository

+

You can clone a Git repository in JupyterLab through the left-hand toolbar or +the Git menu option in the main menu as shown below:

+

JupyterLab Toolbar and Menu

+

Let's clone a repository using the left-hand toolbar. Click on the Git icon, +shown in below:

+

JupyterLab Git

+

Then click on Clone a Repository as shown below:

+

JupyterLab Git Actions

+

Enter the git repository URL, which points to the end-to-end ML workflows demo +project i.e. https://github.com/rh-aiservices-bu/getting-started-with-gpus.

+

Then click Clone button as shown below:

+

Getting Started With GPUs Example Project

+

Cloning takes a few seconds, after which you can double-click and navigate to the +newly-created folder i.e. getting-started-with-gpus that contains your cloned +Git repository.

+

You will be able to find the newly-created folder named getting-started-with-gpus +based on the Git repository name, as shown below:

+

Git Clone Repo Folder on NERC RHOAI

+

Exploring the getting-started-with-gpus repository contents

+

After you've cloned your repository, the getting-started-with-gpus repository +contents appear in a directory under the Name pane. The directory contains +several notebooks as .ipnyb files, along with a standard license and README +file as shown below:

+

Content of The Repository

+

Double-click the torch-use-gpu.ipynb file to open this notebook.

+

This notebook handles the following tasks:

+
    +
  1. +

    Importing torch libraries (utilities).

    +
  2. +
  3. +

    Listing available GPUs.

    +
  4. +
  5. +

    Checking that GPUs are enabled.

    +
  6. +
  7. +

    Assigning a GPU device and retrieve the GPU name.

    +
  8. +
  9. +

    Loading vectors, matrices, and data onto a GPU.

    +
  10. +
  11. +

    Loading a neural network model onto a GPU.

    +
  12. +
  13. +

    Training the neural network model.

    +
  14. +
+

Start by importing the various torch and torchvision utilities:

+
import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from torch.utils.data import TensorDataset
+import torch.optim as optim
+import torchvision
+from torchvision import datasets
+import torchvision.transforms as transforms
+import matplotlib.pyplot as plt
+from tqdm import tqdm
+
+

Once the utilities are loaded, determine how many GPUs are available:

+
torch.cuda.is_available() # Do we have a GPU? Should return True.
+
+
torch.cuda.device_count()  # How many GPUs do we have access to?
+
+

When you have confirmed that a GPU device is available for use, assign a GPU device +and retrieve the GPU name:

+
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
+print(device)  # Check which device we got
+
+
torch.cuda.get_device_name(0)
+
+

Once you have assigned the first GPU device to your device variable, you are ready +to work with the GPU. Let's start working with the GPU by loading vectors, matrices, +and data:

+
X_train = torch.IntTensor([0, 30, 50, 75, 70])  # Initialize a Tensor of Integers with no device specified
+print(X_train.is_cuda, ",", X_train.device)  # Check which device Tensor is created on
+
+
# Move the Tensor to the device we want to use
+X_train = X_train.cuda()
+# Alternative method: specify the device using the variable
+# X_train = X_train.to(device)
+# Confirm that the Tensor is on the GPU now
+print(X_train.is_cuda, ",", X_train.device)
+
+
# Alternative method: Initialize the Tensor directly on a specific device.
+X_test = torch.cuda.IntTensor([30, 40, 50], device=device)
+print(X_test.is_cuda, ",", X_test.device)
+
+

After you have loaded vectors, matrices, and data onto a GPU, load a neural network +model:

+
# Here is a basic fully connected neural network built in Torch.
+# If we want to load it / train it on our GPU, we must first put it on the GPU
+# Otherwise it will remain on CPU by default.
+
+batch_size = 100
+
+class SimpleNet(nn.Module):
+    def __init__(self):
+        super(SimpleNet, self).__init__()
+        self.fc1 = nn.Linear(784, 784)
+        self.fc2 = nn.Linear(784, 10)
+
+    def forward(self, x):
+        x = x.view(batch_size, -1)
+        x = self.fc1(x)
+        x = F.relu(x)
+        x = self.fc2(x)
+        output = F.softmax(x, dim=1)
+        return output
+
+
model = SimpleNet().to(device)  # Load the neural network model onto the GPU
+
+

After the model has been loaded onto the GPU, train it on a data set. For this +example, we will use the FashionMNIST data set:

+
"""
+    Data loading, train and test set via the PyTorch dataloader.
+"""
+# Transform our data into Tensors to normalize the data
+train_transform=transforms.Compose([
+        transforms.ToTensor(),
+        transforms.Normalize((0.1307,), (0.3081,))
+        ])
+
+test_transform=transforms.Compose([
+        transforms.ToTensor(),
+        transforms.Normalize((0.1307,), (0.3081,)),
+        ])
+
+# Set up a training data set
+trainset = datasets.FashionMNIST('./data', train=True, download=True,
+                  transform=train_transform)
+train_loader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
+                                          shuffle=False, num_workers=2)
+
+# Set up a test data set
+testset = datasets.FashionMNIST('./data', train=False,
+                  transform=test_transform)
+test_loader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
+                                        shuffle=False, num_workers=2)
+
+

Once the FashionMNIST data set has been downloaded, you can take a look at the +dictionary and sample its content.

+
# A dictionary to map our class numbers to their items.
+labels_map = {
+    0: "T-Shirt",
+    1: "Trouser",
+    2: "Pullover",
+    3: "Dress",
+    4: "Coat",
+    5: "Sandal",
+    6: "Shirt",
+    7: "Sneaker",
+    8: "Bag",
+    9: "Ankle Boot",
+}
+
+# Plotting 9 random different items from the training data set, trainset.
+figure = plt.figure(figsize=(8, 8))
+for i in range(1, 3 * 3 + 1):
+    sample_idx = torch.randint(len(trainset), size=(1,)).item()
+    img, label = trainset[sample_idx]
+    figure.add_subplot(3, 3, i)
+    plt.title(labels_map[label])
+    plt.axis("off")
+    plt.imshow(img.view(28,28), cmap="gray")
+plt.show()
+
+

The following figure shows a few of the data set's pictures:

+

Downloaded FashionMNIST Data Set

+

There are ten classes of fashion items (e.g. shirt, shoes, and so on). Our goal +is to identify which class each picture falls into. Now you can train the model +and determine how well it classifies the items:

+
def train(model, device, train_loader, optimizer, epoch):
+    """Model training function"""
+    model.train()
+    print(device)
+    for batch_idx, (data, target) in tqdm(enumerate(train_loader)):
+        data, target = data.to(device), target.to(device)
+        optimizer.zero_grad()
+        output = model(data)
+        loss = F.nll_loss(output, target)
+        loss.backward()
+        optimizer.step()
+
+
def test(model, device, test_loader):
+    """Model evaluating function"""
+    model.eval()
+    test_loss = 0
+    correct = 0
+    # Use the no_grad method to increase computation speed
+    # since computing the gradient is not necessary in this step.
+    with torch.no_grad():
+        for data, target in test_loader:
+            data, target = data.to(device), target.to(device)
+            output = model(data)
+            test_loss += F.nll_loss(output, target, reduction='sum').item()  # sum up batch loss
+            pred = output.argmax(dim=1, keepdim=True)  # get the index of the max log-probability
+            correct += pred.eq(target.view_as(pred)).sum().item()
+
+    test_loss /= len(test_loader.dataset)
+
+    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
+        test_loss, correct, len(test_loader.dataset),
+        100. * correct / len(test_loader.dataset)))
+
+
# number of  training 'epochs'
+EPOCHS = 5
+# our optimization strategy used in training.
+optimizer = optim.Adadelta(model.parameters(), lr=0.01)
+
+
for epoch in range(1, EPOCHS + 1):
+        print( f"EPOCH: {epoch}")
+        train(model, device, train_loader, optimizer, epoch)
+        test(model, device, test_loader)
+
+

As the model is trained, you can follow along as its accuracy increases from 63 +to 72 percent. (Your accuracies might differ, because accuracy can depend on the +random initialization of weights.)

+

Once the model is trained, save it locally:

+
# Saving the model's weights!
+torch.save(model.state_dict(), "mnist_fashion_SimpleNet.pt")
+
+

Load and run a PyTorch model

+

Let's now determine how our simple torch model performs using GPU resources.

+

In the getting-started-with-gpus directory, double click on the +torch-test-model.ipynb file (highlighted as shown below) to open the notebook.

+

Content of Torch Test Model Notebook

+

After importing the torch and torchvision utilities, assign the first GPU to +your device variable. Prepare to import your trained model, then place the model +on your GPU and load in its trained weights:

+
import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from torchvision import datasets
+import torchvision.transforms as transforms
+import matplotlib.pyplot as plt
+
+
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
+print(device)  # let's see what device we got
+
+
# Getting set to import our trained model.
+
+# batch size of 1 so we can look at one image at time.
+batch_size = 1
+
+
+class SimpleNet(nn.Module):
+    def __init__(self):
+        super(SimpleNet, self).__init__()
+        self.fc1 = nn.Linear(784, 784)
+        self.fc2 = nn.Linear(784, 10)
+
+    def forward(self, x):
+        x = x.view(batch_size, -1)
+        x = self.fc1(x)
+        x = F.relu(x)
+        x = self.fc2(x)
+        output = F.softmax(x, dim=1)
+        return output
+
+
model = SimpleNet().to( device )
+model.load_state_dict( torch.load("mnist_fashion_SimpleNet.pt") )
+
+

You are now ready to examine some data and determine how your model performs. +The sample run as shown below shows that the model predicted a "bag" with a +confidence of about 0.9192. Despite the % in the output, 0.9192 is very good +because a perfect confidence would be 1.0.

+

Model Performance Test Result

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openshift-ai/other-projects/how-access-s3-data-then-download-and-analyze-it/index.html b/openshift-ai/other-projects/how-access-s3-data-then-download-and-analyze-it/index.html new file mode 100644 index 00000000..9ef62ff6 --- /dev/null +++ b/openshift-ai/other-projects/how-access-s3-data-then-download-and-analyze-it/index.html @@ -0,0 +1,4676 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

How to access, download, and analyze data for S3 usage

+

Prerequisites:

+

Prepare your Jupyter notebook server for using a GPU, you need to have:

+ +

Please ensure that you start your Jupyter notebook server with options as depicted +in the following configuration screen. This screen provides you with the opportunity +to select a notebook image and configure its options, including the number of GPUs.

+

Standard Data Science Workbech Information

+

For our example project, let's name it "Standard Data Science Workbench". We'll +select the Standard Data Science image, choose a Deployment size of Small, +Number of GPUs as 0 and allocate a Cluster storage space of 1GB.

+

If this procedure is successful, you have started your Jupyter notebook server. +When your workbench is ready, the status will change to Running and you can select +"Open" to go to your environment:

+

Open JupyterLab Environment

+

Once you successfully authenticate you should see the NERC RHOAI JupyterLab Web +Interface as shown below:

+

RHOAI JupyterLab Web Interface

+

It's pretty empty right now, though. On the left side of the navigation pane, +locate the Name explorer panel. This panel is where you can create and manage +your project directories.

+

Clone a GitHub Repository

+

You can clone a Git repository in JupyterLab through the left-hand toolbar or +the Git menu option in the main menu as shown below:

+

JupyterLab Toolbar and Menu

+

Let's clone a repository using the left-hand toolbar. Click on the Git icon, +shown in below:

+

JupyterLab Git

+

Then click on Clone a Repository as shown below:

+

JupyterLab Git Actions

+

Enter the git repository URL, which points to the end-to-end ML workflows demo +project i.e. https://github.com/rh-aiservices-bu/access-s3-data.

+

Then click Clone button as shown below:

+

Access, Download and Analysis Example Project

+

Cloning takes a few seconds, after which you can double-click and navigate to the +newly-created folder i.e. access-s3-data that contains your cloned Git repository.

+

You will be able to find the newly-created folder named access-s3-data based on +the Git repository name, as shown below:

+

Git Clone Repo Folder on NERC RHOAI

+

Access and download S3 data

+

In the Name menu, double-click the downloadData.ipynb notebook in the file +explorer on the left side to launch it. This action will open another tab in the +content section of the environment, on the right.

+

Run each cell in the notebook, using the Shift-Enter key combination, and pay +attention to the execution results. Using this notebook, we will:

+
    +
  • +

    Make a connection to an AWS S3 storage bucket

    +
  • +
  • +

    Download a CSV file into the "datasets" folder

    +
  • +
  • +

    Rename the downloaded CSV file to "newtruckdata.csv"

    +
  • +
+

View your new CSV file

+

Inside the "datasets" directory, double-click the "newtruckdata.csv" file. File +contents should appear as shown below:

+

New Truck Data CSV File Content

+

The file contains the data you will analyze and perform some analytics.

+

Getting ready to run analysis on your new CSV file

+

Since you now have data, you can open the next Jupyter notebook, simpleCalc.ipynb, +and perform the following operations:

+
    +
  • +

    Create a dataframe.

    +
  • +
  • +

    Perform simple total and average calculations.

    +
  • +
  • +

    Print the calculation results.

    +
  • +
+

Analyzing your S3 data access run results

+

Double-click the simpleCalc.ipynb Python file. When you execute the cells in the +notebook, results appear like the ones shown below:

+

Simple Calculation Results

+

The cells in the above figure show the mileage of four vehicles. In the next cell, +we calculate total mileage, total rows (number of vehicles) and the average mileage +for all vehicles. Execute the "Perform Calculations" cell to see basic calculations +performed on the data as shown below:

+

Perform Calculation Results

+

Calculations show the total mileage as 742, for four vehicles, and an average +mileage of 185.5.

+

Success! You have added analyzed your run results using the NERC RHOAI.

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openshift-ai/other-projects/images/access-download-and-analysis-s3-data-git-repo.png b/openshift-ai/other-projects/images/access-download-and-analysis-s3-data-git-repo.png new file mode 100644 index 00000000..8d4b3f36 Binary files /dev/null and b/openshift-ai/other-projects/images/access-download-and-analysis-s3-data-git-repo.png differ diff --git a/openshift-ai/other-projects/images/authorize-access-to-the-rhoai.png b/openshift-ai/other-projects/images/authorize-access-to-the-rhoai.png new file mode 100644 index 00000000..97626616 Binary files /dev/null and b/openshift-ai/other-projects/images/authorize-access-to-the-rhoai.png differ diff --git a/openshift-ai/other-projects/images/downloaded-FashionMNIST-data-set.png b/openshift-ai/other-projects/images/downloaded-FashionMNIST-data-set.png new file mode 100644 index 00000000..5c1aef5c Binary files /dev/null and b/openshift-ai/other-projects/images/downloaded-FashionMNIST-data-set.png differ diff --git a/openshift-ai/other-projects/images/getting-started-with-gpus-git-repo.png b/openshift-ai/other-projects/images/getting-started-with-gpus-git-repo.png new file mode 100644 index 00000000..570a15d3 Binary files /dev/null and b/openshift-ai/other-projects/images/getting-started-with-gpus-git-repo.png differ diff --git a/openshift-ai/other-projects/images/git-repo-content.png b/openshift-ai/other-projects/images/git-repo-content.png new file mode 100644 index 00000000..383741fd Binary files /dev/null and b/openshift-ai/other-projects/images/git-repo-content.png differ diff --git a/openshift-ai/other-projects/images/gpu_rhoai.png b/openshift-ai/other-projects/images/gpu_rhoai.png new file mode 100644 index 00000000..103af6fc Binary files /dev/null and b/openshift-ai/other-projects/images/gpu_rhoai.png differ diff --git a/openshift-ai/other-projects/images/jupyterlab-toolbar-main-menu.jpg b/openshift-ai/other-projects/images/jupyterlab-toolbar-main-menu.jpg new file mode 100644 index 00000000..f09dce1c Binary files /dev/null and b/openshift-ai/other-projects/images/jupyterlab-toolbar-main-menu.jpg differ diff --git a/openshift-ai/other-projects/images/jupyterlab_git.png b/openshift-ai/other-projects/images/jupyterlab_git.png new file mode 100644 index 00000000..e1874ba0 Binary files /dev/null and b/openshift-ai/other-projects/images/jupyterlab_git.png differ diff --git a/openshift-ai/other-projects/images/jupyterlab_git_actions.png b/openshift-ai/other-projects/images/jupyterlab_git_actions.png new file mode 100644 index 00000000..dc958348 Binary files /dev/null and b/openshift-ai/other-projects/images/jupyterlab_git_actions.png differ diff --git a/openshift-ai/other-projects/images/jupyterlab_web_interface.png b/openshift-ai/other-projects/images/jupyterlab_web_interface.png new file mode 100644 index 00000000..1c5f0a87 Binary files /dev/null and b/openshift-ai/other-projects/images/jupyterlab_web_interface.png differ diff --git a/openshift-ai/other-projects/images/model-performance-result.png b/openshift-ai/other-projects/images/model-performance-result.png new file mode 100644 index 00000000..a7ba2aae Binary files /dev/null and b/openshift-ai/other-projects/images/model-performance-result.png differ diff --git a/openshift-ai/other-projects/images/newtruckdata.jpg b/openshift-ai/other-projects/images/newtruckdata.jpg new file mode 100644 index 00000000..0ba24289 Binary files /dev/null and b/openshift-ai/other-projects/images/newtruckdata.jpg differ diff --git a/openshift-ai/other-projects/images/open-pytorch-jupyter-lab.png b/openshift-ai/other-projects/images/open-pytorch-jupyter-lab.png new file mode 100644 index 00000000..fd5ea21f Binary files /dev/null and b/openshift-ai/other-projects/images/open-pytorch-jupyter-lab.png differ diff --git a/openshift-ai/other-projects/images/open-standard-ds-workbench-jupyter-lab.png b/openshift-ai/other-projects/images/open-standard-ds-workbench-jupyter-lab.png new file mode 100644 index 00000000..b1d1e2cc Binary files /dev/null and b/openshift-ai/other-projects/images/open-standard-ds-workbench-jupyter-lab.png differ diff --git a/openshift-ai/other-projects/images/perform_calculation_results.jpg b/openshift-ai/other-projects/images/perform_calculation_results.jpg new file mode 100644 index 00000000..9b2332cf Binary files /dev/null and b/openshift-ai/other-projects/images/perform_calculation_results.jpg differ diff --git a/openshift-ai/other-projects/images/pytorch-workbench.png b/openshift-ai/other-projects/images/pytorch-workbench.png new file mode 100644 index 00000000..7c9d0af6 Binary files /dev/null and b/openshift-ai/other-projects/images/pytorch-workbench.png differ diff --git a/openshift-ai/other-projects/images/rhoai-git-cloned-repo.jpg b/openshift-ai/other-projects/images/rhoai-git-cloned-repo.jpg new file mode 100644 index 00000000..9f668967 Binary files /dev/null and b/openshift-ai/other-projects/images/rhoai-git-cloned-repo.jpg differ diff --git a/openshift-ai/other-projects/images/rhoai-git-cloned-repo.png b/openshift-ai/other-projects/images/rhoai-git-cloned-repo.png new file mode 100644 index 00000000..5bb77c9a Binary files /dev/null and b/openshift-ai/other-projects/images/rhoai-git-cloned-repo.png differ diff --git a/openshift-ai/other-projects/images/running-simple-calculation.jpg b/openshift-ai/other-projects/images/running-simple-calculation.jpg new file mode 100644 index 00000000..308dcc20 Binary files /dev/null and b/openshift-ai/other-projects/images/running-simple-calculation.jpg differ diff --git a/openshift-ai/other-projects/images/simple-calculation-results.png b/openshift-ai/other-projects/images/simple-calculation-results.png new file mode 100644 index 00000000..ffdd49f1 Binary files /dev/null and b/openshift-ai/other-projects/images/simple-calculation-results.png differ diff --git a/openshift-ai/other-projects/images/standard-data-science-workbench.png b/openshift-ai/other-projects/images/standard-data-science-workbench.png new file mode 100644 index 00000000..fee199ba Binary files /dev/null and b/openshift-ai/other-projects/images/standard-data-science-workbench.png differ diff --git a/openshift-ai/other-projects/images/torch-test-model-notebook-content.png b/openshift-ai/other-projects/images/torch-test-model-notebook-content.png new file mode 100644 index 00000000..6891806c Binary files /dev/null and b/openshift-ai/other-projects/images/torch-test-model-notebook-content.png differ diff --git a/openshift/applications/creating-a-sample-application/index.html b/openshift/applications/creating-a-sample-application/index.html new file mode 100644 index 00000000..cdfafa57 --- /dev/null +++ b/openshift/applications/creating-a-sample-application/index.html @@ -0,0 +1,4805 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Creating A Sample Application

+

NERC's OpenShift service is a platform that provides a cloud-native environment +for developing and deploying applications.

+

Here, we walk through the process of creating a simple web application, +deploying it. This example uses the Node.js programming language, but the process +with other programming languages will be similar. Instructions provided show the +tasks using both the web console and the command-line tool.

+

Using the Developer perspective on NERC's OpenShift Web Console

+
    +
  1. +

    Go to the NERC's OpenShift Web Console.

    +
  2. +
  3. +

    Click on the Perspective Switcher drop-down menu and select Developer.

    +
  4. +
  5. +

    In the Navigation Menu, click +Add.

    +
  6. +
  7. +

    Creating applications using samples: Use existing code samples to get started + with creating applications on the OpenShift Container Platform. Find the + Create applications using samples section and then click on "View all samples" + and then select the type of application you want to create (e.g. Node.js, Python, + Ruby, etc.), it will load application from Git Repo URL and then review or + modify the application Name for your application. + Alternatively, If you want to create an application from your own source code + located in a git repository, select Import from Git. In the Git Repo URL + text box, enter your git repo url. For example: https://github.com/myuser/mypublicrepo.git. + You may see a warning stating "URL is valid but cannot be reached". You + can ignore this warning!

    +
  8. +
  9. +

    Click "Create" to create your application.

    +
  10. +
  11. +

    Once your application has been created, you can view the details by clicking + on the application name in the Project Overview page.

    +
  12. +
  13. +

    On the Topology View menu, click on your application, or the application + circle if you are in graphical topology view. In the details panel that displays, + scroll to the Routes section on the Resources tab and click on the link to + go to the sample application. This will open your application in a new browser + window. The link will look similar to http://<appname>-<mynamespace>.apps.shift.nerc.mghpcc.org.

    +
  14. +
+
+

Example: Deploying a Python application

+

For a quick example on how to use the "Import from Git" option to deploy a +sample Python application, please refer to this guide.

+
+

Additional resources

+

For more options and customization please read this.

+

Using the CLI (oc command) on your local terminal

+

Alternatively, you can create an application on the NERC's OpenShift cluster by +using the oc new-app command from the command line terminal.

+

i. Make sure you have the oc CLI tool installed and configured on your local +machine following these steps.

+
+

Information

+

Some users may have access to multiple projects. Run the following command to +switch to a specific project space: oc project <your-project-namespace>.

+
+

ii. To create an application, you will need to specify the language and runtime +for your application. You can do this by using the oc new-app command and specifying +a language and runtime. For example, to create a Node.js application, you can run +the following command: +oc new-app nodejs

+

iii. If you want to create an application from an existing Git repository, you can +use the --code flag to specify the URL of the repository. For example: +oc new-app --code https://github.com/myuser/mypublicrepo. If you want to use a +different name, you can add the --name=<newname> argument to the oc new-app command. +For example: oc new-app --name=mytestapp https://github.com/myuser/mypublicrepo. +The platform will try to automatically detect the programming language +of the application code and select the latest version of the base language image +available. If oc new-app can't find any suitable Source-To-Image (S2I) builder +images based on your source code in your Git repository or unable to detect the programming +language or detects the wrong one, you can always specify the image you want to use +as part of the new-app argument, with oc new-app <image url>~<git url>. If it is +using a test application based on Node.js, we could use the same command as before +but add nodejs~ before the URL of the Git repository. +For example: oc new-app nodejs~https://github.com/myuser/mypublicrepo.

+
+

Important Note

+

If you are using a private remote Git repository, you can use the +--source-secret flag to specify an existing source clone secret that +will get injected into your BuildConfig to access the repository. +For example: oc new-app https://github.com/myuser/yourprivaterepo --source-secret=yoursecret.

+
+

iv. Once your application has been created, You can run oc status to see if your +application was successfully built and deployed. Builds and deployments can sometimes +take several minutes to complete, so you may run this several times. you can view +the details by running the oc get pods command. This will show you a list of all +the pods running in your project, including the pod for your new application.

+

v. When using the oc command-line tool to create an application, a route is not +automatically set up to make your application web accessible. Run the following +to make the test application web accessible: +oc create route edge --service=mytestapp --insecure-policy=Redirect. +Once the application is deployed and the route is set up, it can be accessed at +a web URL similar to http://mytestapp-<mynamespace>.apps.shift.nerc.mghpcc.org.

+

For more additional resources

+

For more options and customization please read this.

+

Using the Developer Catalog on NERC's OpenShift Web Console

+

The Developer Catalog offers a streamlined process for deploying applications +and services supported by Operator-backed services like CI/CD, Databases, Builder +Images, and Helm Charts. It comprises a diverse array of application components, +services, event sources, and source-to-image builders ready for integration into +your project.

+
+

About Quick Start Templates

+

By default, the templates build using a public source repository on GitHub that +contains the necessary application code. For more options and customization +please read this.

+
+

Steps

+
    +
  1. +

    Go to the NERC's OpenShift Web Console.

    +
  2. +
  3. +

    Click on the Perspective Switcher drop-down menu and select Developer.

    +
  4. +
  5. +

    In the Navigation Menu, click +Add.

    +
  6. +
  7. +

    You need to find the Developer Catalog section and then select All services + option as shown below:

    +

    Select All Services

    +
  8. +
  9. +

    Then, you will be able search any available services from the Developer Catalog + templates by searching for it on catalog and choose the desired type of service + or component that you wish to include in your project. For this example, select + Databases to list all the database services and then click MariaDB to + see the details for the service.

    +

    Search for MariaDB

    +
    +

    To Create Your Own Developer Catalog Service

    +

    You also have the option to create and integrate custom services into the +Developer Catalog using a template, as described here.

    +
    +
  10. +
  11. +

    Once selected by clicking the template, you will see Instantiate Template web + interface as shown below:

    +

    Initiate MariaDB Template

    +
  12. +
  13. +

    Clicking "Instantiate Template" will display an automatically populated + template containing details for the MariaDB service. Click "Create" to begin + the creation process and enter any custom information required.

    +
  14. +
  15. +

    View the MariaDB service in the Topology view as shown below:

    +

    MariaDB in Topology

    +
  16. +
+

For Additional resources

+

For more options and customization please read this.

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openshift/applications/creating-your-own-developer-catalog-service/index.html b/openshift/applications/creating-your-own-developer-catalog-service/index.html new file mode 100644 index 00000000..3ffca0b2 --- /dev/null +++ b/openshift/applications/creating-your-own-developer-catalog-service/index.html @@ -0,0 +1,4512 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Creating Your Own Developer Catalog Service

+

Here, we walk through the process of creating a simple RStudio web server template +that bundles all resources required to run the server i.e. ConfigMap, Pod, Route, +Service, etc. and then initiate and deploy application from that template.

+

This example template file is readily accessible from the +Git Repository.

+
+

More about Writing Templates

+

For more options and customization please read this.

+
+
    +
  1. +

    Find the From Local Machine section and click on Import YAML as shown + below:

    +

    Import YAML

    +
  2. +
  3. +

    On opened YAML editor paste the contents of the template copied from the + rstudio-server-template.yaml file located at the provided Git Repo.

    +

    YAML Editor

    +
  4. +
  5. +

    You need to find the Developer Catalog section and then select All services + option as shown below:

    +

    Select All Services

    +
  6. +
  7. +

    Then, you will be able to use the created Developer Catalog template by searching + for "RStudio" on catalog as shown below:

    +

    Search for RStudio Template

    +
  8. +
  9. +

    Once selected by clicking the template, you will see Instantiate Template web + interface as shown below:

    +

    Initiate Template

    +
  10. +
  11. +

    Based on our template definition, we request that users input a preferred password + for the RStudio server so the following interface will prompt for your password + that will be used during login to the RStudio server.

    +

    Provide the RStudio Password

    +
  12. +
  13. +

    Once successfully initiated, you can either open the application URL using the + Open URL icon as shown below or you can naviate to the Routes section and + click on Location path as shown below:

    +

    How to get the RStudio Application URL

    +
  14. +
  15. +

    To get the Username to be used for login on RStudio server, you need to click + on running pod i.e. rstudio-server as shown below:

    +

    Detail Information for RStudio Pod

    +
  16. +
  17. +

    Then select the YAML section to find out the attribute value for runAsUser + that is used as the Username while Sign in to RStudio server as shown below:

    +

    Username for RStudio Server from Pod runAsUser

    +
  18. +
  19. +

    Finally, you will be able to see the RStudio web interface!

    +
  20. +
+
+

Modifying uploaded templates

+

You can edit a template that has already been uploaded to your project: +oc edit template <template>

+
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openshift/applications/deleting-applications/index.html b/openshift/applications/deleting-applications/index.html new file mode 100644 index 00000000..b0af29b9 --- /dev/null +++ b/openshift/applications/deleting-applications/index.html @@ -0,0 +1,4602 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Deleting your applications

+

Deleting applications using the Developer perspective on NERC's OpenShift Web Console

+

You can delete applications created in your project by using the +Developer perspective as following:

+

To delete an application and all of its associated components using the +Topology view menu in the Developer perspective:

+
    +
  1. +

    Go to the NERC's OpenShift Web Console.

    +
  2. +
  3. +

    Click on the Perspective Switcher drop-down menu and select Developer.

    +
  4. +
  5. +

    Click the application you want to delete to see the side panel with + the resource details of the application.

    +
  6. +
  7. +

    Click the Actions drop-down menu displayed on the upper right of the panel, + and select Delete Application to see a confirmation dialog box as shown below:

    +

    Delete an application using Actions

    +
  8. +
  9. +

    Enter the name of the application and click Delete to delete it.

    +
  10. +
+

Or, if you are using Graph view then you can also right-click the application +you want to delete and click Delete Application to delete it as shown below:

+

Delete an application using Context menu

+

Deleting applications using the oc command on your local terminal

+

Alternatively, you can delete the resource objects by using the +oc delete command from the command line terminal. Make sure you have the oc +CLI tool installed and configured on your local machine following these steps.

+
+

How to select resource object?

+

You can delete a single resource object by name, or delete a set of resource +objects by specifying a label selector.

+
+

When an application is deployed, resource objects for that application will +typically have an app label applied to them with value corresponding to the name +of the application. This can be used with the label selector to delete all +resource objects for an application.

+

To test what resource objects would be deleted when using a label selector, use +the oc get command to query the set of objects which would be matched.

+

oc get all --selector app=<application-name> -o name

+

For example:

+
oc get all --selector app=rstudio-server -o name
+pod/rstudio-server
+service/rstudio-server
+route.route.openshift.io/rstudio-server
+
+

If you are satisfied that what is shown are the resource objects for your +application, then run oc delete.

+

oc delete all --selector app=<application-name>

+
+

Important Note

+

Selector all matches on a subset of all resource object types that exist. +It targets the core resource objects that would be created for a build and deployment. +It will not include resource objects such as persistent volume claims (pvc), +config maps (configmap), secrets (secret), and others.

+
+

You will either need to delete these resource objects separately, or if they also +have been labelled with the app tag, list the resource object types along with all.

+

oc delete all,configmap,pvc,serviceaccount,rolebinding --selector app=<application-name>

+

If you are not sure what labels have been applied to resource objects for your +application, you can run oc describe on the resource object to see the labels +applied to it. For example:

+
oc describe pod/rstudio-server
+Name:         rstudio-server
+Namespace:    64b664c37f2a47c39c3cf3942ff4d0be
+Priority:     0
+Node:         wrk-11/10.30.6.21
+Start Time:   Fri, 16 Dec 2022 10:59:23 -0500
+Labels:       app=rstudio-server
+            template.openshift.io/template-instance-owner=44a3fae8-4e8e-4058-a4a8-0af7bbb41f6
+...
+
+
+

Important Note

+

It is important to check what labels have been used with your application if +you have created it using a template, as templates may not follow the convention +of using the app label.

+
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openshift/applications/editing-applications/index.html b/openshift/applications/editing-applications/index.html new file mode 100644 index 00000000..312ef52a --- /dev/null +++ b/openshift/applications/editing-applications/index.html @@ -0,0 +1,4552 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Editing applications

+

You can edit the configuration and the source code of the application you create +using the Topology view.

+

Editing the source code of an application using the Developer perspective

+

You can click the "Edit Source Code" icon, displayed at the bottom-right of the +deployed application, to access your source code and modify it as shown below:

+

Edit the source code of an application

+
+

Information

+

This feature is available only when you create applications using the +From Git, Container Image, From Catalog, and From Dockerfile +options.

+
+

Editing the application configuration using the Developer perspective

+
    +
  1. +

    In the Topology view, right-click the application to see the edit options + available as shown below:

    +

    Edit an application

    +

    Or, In the Topology view, click the deployed application to reveal the +right-side Overview panel. From the Actions drop-down list, we can see +the similar edit options available as shown below:

    +

    Edit an application using Action

    +
  2. +
  3. +

    Click on any of the options available to edit resource used by your application, + the pop-up form will be pre-populated with the values you had added while creating + the applicaiton.

    +
  4. +
  5. +

    Click Save to restart the build and deploy a new image.

    +
  6. +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openshift/applications/images/add-hpa-popup.png b/openshift/applications/images/add-hpa-popup.png new file mode 100644 index 00000000..2913c790 Binary files /dev/null and b/openshift/applications/images/add-hpa-popup.png differ diff --git a/openshift/applications/images/compute_resources_pod_yaml.png b/openshift/applications/images/compute_resources_pod_yaml.png new file mode 100644 index 00000000..6d095f3e Binary files /dev/null and b/openshift/applications/images/compute_resources_pod_yaml.png differ diff --git a/openshift/applications/images/delete-application-using-actions.png b/openshift/applications/images/delete-application-using-actions.png new file mode 100644 index 00000000..19007896 Binary files /dev/null and b/openshift/applications/images/delete-application-using-actions.png differ diff --git a/openshift/applications/images/delete-application-using-right_click.png b/openshift/applications/images/delete-application-using-right_click.png new file mode 100644 index 00000000..af9c4765 Binary files /dev/null and b/openshift/applications/images/delete-application-using-right_click.png differ diff --git a/openshift/applications/images/edit-an-application-using-action.png b/openshift/applications/images/edit-an-application-using-action.png new file mode 100644 index 00000000..28c12cdb Binary files /dev/null and b/openshift/applications/images/edit-an-application-using-action.png differ diff --git a/openshift/applications/images/edit-an-application.png b/openshift/applications/images/edit-an-application.png new file mode 100644 index 00000000..844be16d Binary files /dev/null and b/openshift/applications/images/edit-an-application.png differ diff --git a/openshift/applications/images/edit-the-source-code-of-application.png b/openshift/applications/images/edit-the-source-code-of-application.png new file mode 100644 index 00000000..a372915e Binary files /dev/null and b/openshift/applications/images/edit-the-source-code-of-application.png differ diff --git a/openshift/applications/images/hpa-form.png b/openshift/applications/images/hpa-form.png new file mode 100644 index 00000000..b5058e73 Binary files /dev/null and b/openshift/applications/images/hpa-form.png differ diff --git a/openshift/applications/images/import-pod-yaml-content.png b/openshift/applications/images/import-pod-yaml-content.png new file mode 100644 index 00000000..f9f34e05 Binary files /dev/null and b/openshift/applications/images/import-pod-yaml-content.png differ diff --git a/openshift/applications/images/import-yaml-content.png b/openshift/applications/images/import-yaml-content.png new file mode 100644 index 00000000..00f4bba2 Binary files /dev/null and b/openshift/applications/images/import-yaml-content.png differ diff --git a/openshift/applications/images/import-yaml.png b/openshift/applications/images/import-yaml.png new file mode 100644 index 00000000..a8bc42f8 Binary files /dev/null and b/openshift/applications/images/import-yaml.png differ diff --git a/openshift/applications/images/initiate-mariadb-template.png b/openshift/applications/images/initiate-mariadb-template.png new file mode 100644 index 00000000..b9b85ead Binary files /dev/null and b/openshift/applications/images/initiate-mariadb-template.png differ diff --git a/openshift/applications/images/initiate-template.png b/openshift/applications/images/initiate-template.png new file mode 100644 index 00000000..d0f768ab Binary files /dev/null and b/openshift/applications/images/initiate-template.png differ diff --git a/openshift/applications/images/limit_ranges.png b/openshift/applications/images/limit_ranges.png new file mode 100644 index 00000000..9ef1648d Binary files /dev/null and b/openshift/applications/images/limit_ranges.png differ diff --git a/openshift/applications/images/mariadb-in-topology.png b/openshift/applications/images/mariadb-in-topology.png new file mode 100644 index 00000000..dc4cf9d6 Binary files /dev/null and b/openshift/applications/images/mariadb-in-topology.png differ diff --git a/openshift/applications/images/nvidia-A100-gpu.png b/openshift/applications/images/nvidia-A100-gpu.png new file mode 100644 index 00000000..8e143c5b Binary files /dev/null and b/openshift/applications/images/nvidia-A100-gpu.png differ diff --git a/openshift/applications/images/nvidia-V100-gpu.png b/openshift/applications/images/nvidia-V100-gpu.png new file mode 100644 index 00000000..84b5d04c Binary files /dev/null and b/openshift/applications/images/nvidia-V100-gpu.png differ diff --git a/openshift/applications/images/pod-object-definition-yaml.png b/openshift/applications/images/pod-object-definition-yaml.png new file mode 100644 index 00000000..78893520 Binary files /dev/null and b/openshift/applications/images/pod-object-definition-yaml.png differ diff --git a/openshift/applications/images/pod-scale-count-arrow.png b/openshift/applications/images/pod-scale-count-arrow.png new file mode 100644 index 00000000..0dc9138e Binary files /dev/null and b/openshift/applications/images/pod-scale-count-arrow.png differ diff --git a/openshift/applications/images/provide-password.png b/openshift/applications/images/provide-password.png new file mode 100644 index 00000000..7668aed5 Binary files /dev/null and b/openshift/applications/images/provide-password.png differ diff --git a/openshift/applications/images/resource-limits-form.png b/openshift/applications/images/resource-limits-form.png new file mode 100644 index 00000000..59b159bb Binary files /dev/null and b/openshift/applications/images/resource-limits-form.png differ diff --git a/openshift/applications/images/resource-limits-popup.png b/openshift/applications/images/resource-limits-popup.png new file mode 100644 index 00000000..092dfc2c Binary files /dev/null and b/openshift/applications/images/resource-limits-popup.png differ diff --git a/openshift/applications/images/rstudio-pod-info.png b/openshift/applications/images/rstudio-pod-info.png new file mode 100644 index 00000000..62f6f12b Binary files /dev/null and b/openshift/applications/images/rstudio-pod-info.png differ diff --git a/openshift/applications/images/rstudio-server-app-url.png b/openshift/applications/images/rstudio-server-app-url.png new file mode 100644 index 00000000..a4bd2c84 Binary files /dev/null and b/openshift/applications/images/rstudio-server-app-url.png differ diff --git a/openshift/applications/images/rstudio-server-user-info.png b/openshift/applications/images/rstudio-server-user-info.png new file mode 100644 index 00000000..6a45c567 Binary files /dev/null and b/openshift/applications/images/rstudio-server-user-info.png differ diff --git a/openshift/applications/images/scale-pod-count.png b/openshift/applications/images/scale-pod-count.png new file mode 100644 index 00000000..8c915af0 Binary files /dev/null and b/openshift/applications/images/scale-pod-count.png differ diff --git a/openshift/applications/images/search-developer-catalog.png b/openshift/applications/images/search-developer-catalog.png new file mode 100644 index 00000000..86c74a78 Binary files /dev/null and b/openshift/applications/images/search-developer-catalog.png differ diff --git a/openshift/applications/images/search-mariadb-database.png b/openshift/applications/images/search-mariadb-database.png new file mode 100644 index 00000000..35656a0f Binary files /dev/null and b/openshift/applications/images/search-mariadb-database.png differ diff --git a/openshift/applications/images/select-service-catalog.png b/openshift/applications/images/select-service-catalog.png new file mode 100644 index 00000000..bd80cadc Binary files /dev/null and b/openshift/applications/images/select-service-catalog.png differ diff --git a/openshift/applications/scaling-and-performance-guide/index.html b/openshift/applications/scaling-and-performance-guide/index.html new file mode 100644 index 00000000..b6f5d417 --- /dev/null +++ b/openshift/applications/scaling-and-performance-guide/index.html @@ -0,0 +1,5211 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Scaling and Performance Guide

+

Understanding Pod

+

Pods serve as the smallest unit of compute that can be defined, deployed, and +managed within the OpenShift Container Platform (OCP). The OCP utilizes the +Kubernetes concept of a pod, +which consists of one or more containers deployed together on a single host.

+

Pods are essentially the building blocks of a Kubernetes cluster, analogous to a +machine instance (either physical or virtual) for a container. Each pod is assigned +its own internal IP address, granting it complete ownership over its port space. +Additionally, containers within a pod can share local storage and network resources.

+

The lifecycle of a pod typically involves several stages: first, the pod is defined; +then, it is scheduled to run on a node within the cluster; finally, it runs until +its container(s) exit or until it is removed due to some other circumstance. Depending +on the cluster's policy and the exit code of its containers, pods may be removed +after exiting, or they may be retained to allow access to their container logs.

+

Example pod configurations

+

The following is an example definition of a pod from a Rails application. It +demonstrates many features of pods, most of which are discussed in other topics +and thus only briefly mentioned here:

+

Pod object definition (YAML)

+
    +
  1. +

    Pods can be "tagged" with one or more labels, which can then be used to select + and manage groups of pods in a single operation. The labels are stored in key/value + format in the metadata hash.

    +
  2. +
  3. +

    The pod restart policy with possible values Always, OnFailure, and Never. + The default value is Always. Read this + to learn about "Configuring how pods behave after restart".

    +
  4. +
  5. +

    OpenShift Container Platform defines a security context for containers which + specifies whether they are allowed to run as privileged containers, run as a user + of their choice, and more. The default context is very restrictive but administrators + can modify this as needed.

    +
  6. +
  7. +

    containers specifies an array of one or more container definitions.

    +
  8. +
  9. +

    The container specifies where external storage volumes are mounted within the + container. In this case, there is a volume for storing access to credentials the + registry needs for making requests against the OpenShift Container Platform API.

    +
  10. +
  11. +

    Specify the volumes to provide for the pod. Volumes mount at the specified path. + Do not mount to the container root, /, or any path that is the same in the host + and the container. This can corrupt your host system if the container is sufficiently + privileged, such as the host /dev/pts files. It is safe to mount the host by + using /host.

    +
  12. +
  13. +

    Each container in the pod is instantiated from its own container image.

    +
  14. +
  15. +

    Pods making requests against the OpenShift Container Platform API is a common + enough pattern that there is a serviceAccount field for specifying which service + account user the pod should authenticate as when making the requests. This enables + fine-grained access control for custom infrastructure components.

    +
  16. +
  17. +

    The pod defines storage volumes that are available to its container(s) to use. + In this case, it provides an ephemeral volume for a secret volume containing the + default service account tokens. If you attach persistent volumes that have high + file counts to pods, those pods can fail or can take a long time to start.

    +
  18. +
+
+

Viewing pods

+

You can refer to this user guide +on how to view all pods, their usage statics (i.e. CPU, memory, and storage +consumption) and logs in your project using the OpenShift CLI (oc) commands.

+
+

Compute Resources

+

Each container running on a node consumes compute resources, which are measurable +quantities that can be requested, allocated, and consumed.

+

When authoring a pod configuration YAML file, you can optionally specify how much +CPU, memory (RAM), and local ephemeral storage each container needs in order to +better schedule pods in the cluster and ensure satisfactory performance as shown +below:

+

Pod Compute Resources (YAML)

+

CPU and memory can be specified in a couple of ways:

+
    +
  • +

    Resource requests and limits are optional parameters specified at the container + level. OpenShift computes a Pod's request and limit as the sum of requests and + limits across all of its containers. OpenShift then uses these parameters for + scheduling and resource allocation decisions.

    +

    The request value specifies the min value you will be guaranteed. The request +value is also used by the scheduler to assign pods to nodes.

    +

    Pods will get the amount of memory they request. If they exceed their memory +request, they could be killed if another pod happens to need this memory. Pods +are only ever killed when using less memory than requested if critical system +or high priority workloads need the memory utilization.

    +

    Likewise, each container within a Pod is granted the CPU resources it requests, +subject to availability. Additional CPU cycles may be allocated if resources +are available and not required by other active Pods/Jobs.

    +
    +

    Important Information

    +

    If a Pod's total requests are not available on a single node, then the Pod +will remain in a Pending state (i.e. not running) until these resources +become available.

    +
    +
  • +
  • +

    The limit value specifies the max value you can consume. Limit is the value + applications should be tuned to use. Pods will be memory, CPU throttled when + they exceed their available memory and CPU limit.

    +
  • +
+

CPU is measured in units called millicores, where 1000 millicores ("m") = 1 vCPU +or 1 Core. Each node in a cluster inspects the operating system to determine the +amount of CPU cores on the node, then multiplies that value by 1000 to express its +total capacity. For example, if a node has 2 cores, the node's CPU capacity would +be represented as 2000m. If you wanted to use 1/10 of a single core, it would +be represented as 100m.

+

Memory and ephemeral storage are measured in bytes. In addition, it may be used +with SI suffixes (E, P, T, G, M, K) or their power-of-two-equivalents (Ei, Pi, Ti, +Gi, Mi, Ki).

+
+

What happens if I did not specify the Compute Resources on Pod YAML?

+

If you don't specify the compute resources for your objects i.e. containers, +to restrict them from running with unbounded compute resources from our cluster +the objects will use the limit ranges specified for your project namespace. +With limit ranges, we restrict resource consumption for specific objects in +a project. You can also be able to view the current limit range for your project +by going into the Administrator perspective and then navigating into the +"LimitRange details" as shown below:

+

Limit Ranges

+
+

How to specify pod to use GPU?

+

So from a Developer perspective, the only thing you have to worry about is +asking for GPU resources when defining your pods, with something like:

+
spec:
+  containers:
+  - name: app
+    image: ...
+    resources:
+      requests:
+        memory: "64Mi"
+        cpu: "250m"
+        nvidia.com/gpu: 1
+      limits:
+        memory: "128Mi"
+        cpu: "500m"
+
+

In the sample Pod Spec above, you can allocate GPUs to pods by specifying the GPU +resource nvidia.com/gpu and indicating the desired number of GPUs. This number +should not exceed the GPU quota specified by the value of the +"OpenShift Request on GPU Quota" attribute that has been approved for your +"NERC-OCP (OpenShift)" resource allocation on NERC's ColdFront as +described here.

+

If you need to increase this quota value, you can request a change as +explained here.

+

The "resources" section under "containers" with the nvidia.com/gpu specification +indicates the number of GPUs you want in this container. Below is an example of +a running pod YAML that requests the GPU device with a count of 2:

+
apiVersion: v1
+kind: Pod
+metadata:
+  name: gpu-pod
+spec:
+  restartPolicy: Never
+  containers:
+    - name: cuda-container
+      image: nvcr.io/nvidia/k8s/cuda-sample:vectoradd-cuda10.2
+      command: ["sleep"]
+      args: ["infinity"]
+      resources:
+        limits:
+          nvidia.com/gpu: 2
+  nodeSelector:
+    nvidia.com/gpu.product: NVIDIA-A100-SXM4-40GB
+
+

On opened YAML editor paste the contents of the above given pod YAML as shown below:

+

YAML Editor GPU Pod

+

After the pod is running, navigate to the pod details and execute the following +command in the Terminal to view the currently available NVIDIA GPU devices:

+

NVIDIA SMI A100 command

+

Additionally, you can execute the following command to narrow down and retrieve +the name of the GPU device:

+
nvidia-smi --query-gpu=gpu_name --format=csv,noheader --id=0 | sed -e 's/ /-/g'
+
+NVIDIA-A100-SXM4-40GB
+
+

How to select a different GPU device?

+

We can specify information about the GPU product type, family, count, and so on, +as shown in the Pod Spec above. Also, these node labels can be used in the Pod Spec +to schedule workloads based on criteria such as the GPU device name, as shown under +nodeSelector as shown below:

+
apiVersion: v1
+kind: Pod
+metadata:
+  name: gpu-pod2
+spec:
+  restartPolicy: Never
+  containers:
+    - name: cuda-container
+      image: nvcr.io/nvidia/k8s/cuda-sample:vectoradd-cuda10.2
+      command: ["sleep"]
+      args: ["infinity"]
+      resources:
+        limits:
+          nvidia.com/gpu: 1
+  nodeSelector:
+    nvidia.com/gpu.product: Tesla-V100-PCIE-32GB
+
+

When you run the nvidia-smi command in the terminal, you can observe the +availability of the different V100 NVIDIA GPU device, as shown below:

+

NVIDIA SMI V100 command

+

Scaling

+

Scaling defines the number of pods or instances of the application you want to +deploy. Bare pods not managed by a replication controller will not be rescheduled +in the event of a node disruption. You can deploy your application using Deployment +or Deployment Config objects to maintain the desired number of healthy pods and +manage them from the web console. You can create deployment strategies +that help reduce downtime during a change or an upgrade to the application. For +more information about deployment, please read this.

+
+

Benefits of Scaling

+

This will allow for a quicker response to peaks in demand, and reduce costs by +automatically scaling down when resources are no longer needed.

+
+

Scaling application pods, resources and observability

+

The Topology view provides the details of the deployed components in the +Overview panel. You can use the Details, Resources and Observe +tabs to scale the application pods, check build status, services, routes, metrics, +and events as follows:

+

Click on the component node to see the Overview panel to the right.

+

Use the Details tab to:

+
    +
  • +

    Scale your pods using the up and down arrows to increase or decrease the number + of pods or instances of the application manually as shown below:

    +

    Scale the Pod Count

    +

    Alternatively, we can easily configure and modify the pod counts by right-click +the application to see the edit options available and selecting the Edit Pod +Count as shown below:

    +

    Edit the Pod Count

    +
  • +
  • +

    Check the Labels, Annotations, and Status of the application.

    +
  • +
+

Click the Resources tab to:

+
    +
  • +

    See the list of all the pods, view their status, access logs, and click on the + pod to see the pod details.

    +
  • +
  • +

    See the builds, their status, access logs, and start a new build if needed.

    +
  • +
  • +

    See the services and routes used by the component.

    +
  • +
+

Click the Observe tab to:

+
    +
  • +

    See the metrics to see CPU usage, Memory usage and Bandwidth consumption.

    +
  • +
  • +

    See the Events.

    +
    +

    Detailed Monitoring your project and application metrics

    +

    On the left navigation panel of the Developer perspective, click +Observe to see the Dashboard, Metrics, Alerts, and Events for your project. +For more information about Monitoring project and application metrics +using the Developer perspective, please +read this.

    +
    +
  • +
+

Scaling manually

+

To manually scale a DeploymentConfig object, use the oc scale command.

+
oc scale dc <dc_name> --replicas=<replica_count>
+
+

For example, the following command sets the replicas in the frontend DeploymentConfig +object to 3.

+
oc scale dc frontend --replicas=3
+
+

The number of replicas eventually propagates to the desired and current state of +the deployment configured by the DeploymentConfig object frontend.

+
+

Scaling applications based on a schedule (Cron)

+

You can also integrate schedule based scaling uses OpenShift/Kubernetes native +resources called CronJob that execute a task periodically (date + time) +written in Cron format. For example, +scaling an app to 5 replicas at 0900; and then scaling it down to 1 pod at 2359. +To learn more about this, please refer to this blog post.

+
+

AutoScaling

+

We can configure automatic scaling, or autoscaling, for applications to match +incoming demand. This feature automatically adjusts the scale of a replication +controller or deployment configuration based on metrics collected from the pods +belonging to that replication controller or deployment configuration. You can +create a Horizontal Pod Autoscaler (HPA) for any deployment, deployment config, +replica set, replication controller, or stateful set.

+

For instance, if an application receives no traffic, it is scaled down to the +minimum number of replicas configured for the application. Conversely, replicas +can be scaled up to meet demand if traffic to the application increases.

+

Understanding Horizontal Pod Autoscalers (HPA)

+

You can create a horizontal pod autoscaler to specify the minimum and maximum +number of pods you want to run, as well as the CPU utilization or memory utilization +your pods should target.

+ + + + + + + + + + + + + + + + + +
MetricDescription
CPU UtilizationNumber of CPU cores used. Can be used to calculate a percentage of the pod’s requested CPU.
Memory UtilizationAmount of memory used. Can be used to calculate a percentage of the pod’s requested memory.
+

After you create a horizontal pod autoscaler, OCP begins to query the CPU and/or +memory resource metrics on the pods. When these metrics are available, the HPA +computes the ratio of the current metric utilization with the desired metric +utilization, and scales up or down accordingly. The query and scaling occurs at +a regular interval, but can take one to two minutes before metrics become available.

+

For replication controllers, this scaling corresponds directly to the replicas +of the replication controller. For deployment configurations, scaling corresponds +directly to the replica count of the deployment configuration. Note that autoscaling +applies only to the latest deployment in the Complete phase.

+

For more information on how the HPA works, read this documentation.

+
+

Very Important Note

+

To implement the HPA, all targeted pods must have a Resource limits +set on their containers. The HPA will not have CPU and Memory metrics until +Resource limits are set. CPU request and limit must be set before CPU utilization +can be set. Memory request and limit must be set before Memory utilization +can be set.

+
+

Resource Limit

+

Resource limits control how much CPU and memory a container will consume on +a node. You can specify a limit on how much memory and CPU an container can consume +in both request and limit values. You can also specify the min request and max +limit of a given container as well as the max ratio between request and limit. +we can easily configure and modify the Resource Limit by right-click the +application to see the edit options available as shown below:

+

Resource Limits Popup

+

Then selecting the Edit resource limits link to set the amount of CPU and Memory +resources a container is guaranteed or allowed to use when running. In the pod +specifications, you must specify the resource requests, such as CPU and memory as +described here.

+

The HPA uses this specification to determine the resource utilization and then +scales the target up or down. Utilization values are calculated as a percentage +of the resource requests of each pod. Missing resource request values can affect +the optimal performance of the HPA.

+

Resource Limits Form

+

Creating a horizontal pod autoscaler by using the web console

+

From the web console, you can create a HPA that specifies the minimum and maximum +number of pods you want to run on a Deployment or DeploymentConfig object. You +can also define the amount of CPU or memory usage that your pods should target. +The HPA increases and decreases the number of replicas between the minimum and +maximum numbers to maintain the specified CPU utilization across all pods.

+

To create an HPA in the web console

+
    +
  • +

    In the Topology view, click the node to reveal the side pane.

    +
  • +
  • +

    From the Actions drop-down list, select Add HorizontalPodAutoscaler as + shown below:

    +

    Horizontal Pod Autoscaler Popup

    +
  • +
  • +

    This will open the Add HorizontalPodAutoscaler form as shown below:

    +

    Horizontal Pod Autoscaler Form

    +
    +

    Configure via: Form or YAML View

    +

    While creating or editing the horizontal pod autoscaler in the web console, +you can switch from Form view to YAML view.

    +
    +
  • +
  • +

    From the Add HorizontalPodAutoscaler form, define the name, minimum and maximum + pod limits, the CPU and memory usage, and click Save.

    +
  • +
+

To edit an HPA in the web console

+
    +
  • +

    In the Topology view, click the node to reveal the side pane.

    +
  • +
  • +

    From the Actions drop-down list, select Edit HorizontalPodAutoscaler + to open the Edit Horizontal Pod Autoscaler form.

    +
  • +
  • +

    From the Edit Horizontal Pod Autoscaler form, edit the minimum and maximum + pod limits and the CPU and memory usage, and click Save.

    +
  • +
+

To remove an HPA in the web console

+
    +
  • +

    In the Topology view, click the node to reveal the side panel.

    +
  • +
  • +

    From the Actions drop-down list, select Remove HorizontalPodAutoscaler.

    +
  • +
  • +

    In the confirmation pop-up window, click Remove to remove the HPA.

    +
  • +
+
+

Best Practices

+

Read this document +to learn more about best practices regarding Horizontal Pod Autoscaler (HPA) +autoscaling.

+
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openshift/decommission/decommission-openshift-resources/index.html b/openshift/decommission/decommission-openshift-resources/index.html new file mode 100644 index 00000000..a1ea7f1e --- /dev/null +++ b/openshift/decommission/decommission-openshift-resources/index.html @@ -0,0 +1,5000 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Decommission OpenShift Resources

+

You can decommission all of your NERC OpenShift resources sequentially as outlined +below.

+

Prerequisite

+
    +
  • +

    Backup: Back up any critical data or configurations stored on the resources + that going to be decommissioned. This ensures that important information is not + lost during the process.

    +
  • +
  • +

    Kubernetes Objects (Resources): Please review all OpenShift Kubernetes Objects + (Resources) to ensure they are not actively used and ready to be decommissioned.

    +
  • +
  • +

    Install and configure the OpenShift CLI (oc), see How to Setup the + OpenShift CLI Tools + for more information.

    +
  • +
+

Delete all Data Science Project resources from the NERC's Red Hat OpenShift AI

+

Navigate to the NERC's Red Hat OpenShift AI (RHOAI) dashboard from the NERC's +OpenShift Web Console +via the web browser as described here.

+

Once you gain access to the NERC's RHOAI dashboard, you can click on specific Data +Science Project (DSP) corresponding to the appropriate allocation of resources you +want to clean up, as described here.

+

The NERC RHOAI dashboard will look like the one shown below, displaying all consumed +resources:

+

RHOAI Dashboard Before

+

Delete all Workbenches

+

Delete all workbenches by clicking on the three dots on the right side of the +individual workbench and selecting Delete workbench, as shown below:

+

Delete Workbench

+

When prompted please confirm your workbench name and then click "Delete workbench" +button as shown below:

+

Delete Workbench Confirmation

+

Delete all Cluster Storage

+

Delete all cluster storage by clicking on the three dots on the right side of the +individual cluster storage and selecting Delete storage, as shown below:

+

Delete Cluster Storage Confirmation

+

When prompted please confirm your cluster storage name and then click "Delete storage" +button as shown below:

+

Delete Cluster Storage Confirmation

+

Delete all Data connections

+

Delete all data connections by clicking on the three dots on the right side of the +individual data connection and selecting Delete data connection, as shown below:

+

Delete Data Connection

+

When prompted please confirm your data connection name and then click "Delete data +connection" button as shown below:

+

Delete Data Connection Confirmation

+

Delete all Pipelines

+

Delete all pipelines by clicking on the three dots on the right side of the +individual pipeline and selecting Delete pipeline, as shown below:

+

Delete Pipeline

+

When prompted please confirm your pipeline name and then click "Delete pipeline" +button as shown below:

+

Delete Pipeline Confirmation

+

Delete all Models and Model Servers

+

Delete all model servers by clicking on the three dots on the right side of the +individual pipeline and selecting Delete model server, as shown below:

+

Delete Model Server

+

When prompted please confirm your model server name and then click "Delete model +server" button as shown below:

+

Delete Model Server Confirmation

+
+

Important Note

+

Deleting Model Server will automatically delete ALL Models deployed on the +model server.

+
+

Finally, the NERC RHOAI dashboard will look clean and empty without any resources, +as shown below:

+

RHOAI Dashboard After

+

Now, you can return to "OpenShift Web Console" by using the application launcher +icon (the black-and-white icon that looks like a grid), and choosing the "OpenShift +Console" as shown below:

+

The NERC OpenShift Web Console Link

+

Delete all resources from the NERC OpenShift

+

Run oc login in your local machine's terminal using your own token to authenticate +and access all your projects on the NERC OpenShift as +described here. +Please ensure you have already selected the correct project that needs to be +decommissioned, as shown below:

+
oc login --token=<your_token> --server=https://api.shift.nerc.mghpcc.org:6443
+Logged into "https://api.shift.nerc.mghpcc.org:6443" as "test1_user@fas.harvard.edu" using the token provided.
+
+You have access to the following projects and can switch between them with 'oc project <projectname>':
+
+    test-project-1
+* test-project-2
+    test-project-3
+
+Using project "test-project-2".
+
+

Switching to your project that need to be decommissioned by running +oc project <projectname> command:

+
oc project <your_openshift_project_to_decommission>
+Using project "<your_openshift_project_to_decommission>" on server "https://api.shift.nerc.mghpcc.org:6443".
+
+

Please confirm the correct project is being selected by running oc project, as +shown below:

+
oc project
+Using project "<your_openshift_project_to_decommission>" on server "https://api.shift.nerc.mghpcc.org:6443".
+
+
+

Important Note: Best Practice for Specifying Namespace in oc Commands.

+

The best practice is to specify the namespace in each oc command using the +-n option, e.g., -n <your_openshift_project_to_decommission>. This ensures +that your commands are always executed in the intended project, minimizing +the risk of affecting the wrong resources.

+

For example, the oc get all command can also be executed by specifying the +namespace using the -n option, like this: oc get all -n <your_openshift_project_to_decommission>.

+
+

Please review all resources currently being used by your project by running +oc get all, as shown below:

+
oc get all
+
+NAME                                                                  READY   STATUS             RESTARTS       AGE
+pod/ds-pipeline-persistenceagent-pipelines-definition-868665f7z9lpm   1/1     Running            0              141m
+...
+
+NAME                                       TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                               AGE
+service/ds-pipeline-pipelines-definition   ClusterIP   172.30.133.168   <none>        8443/TCP,8888/TCP,8887/TCP            141m
+...
+
+NAME                                                                 READY   UP-TO-DATE   AVAILABLE   AGE
+deployment.apps/ds-pipeline-persistenceagent-pipelines-definition    1/1     1            1           141m
+...
+
+NAME                                                                            DESIRED   CURRENT   READY   AGE
+replicaset.apps/ds-pipeline-persistenceagent-pipelines-definition-868665f748    1         1         1       141m
+...
+
+NAME                                                 IMAGE REPOSITORY
+                                                TAGS   UPDATED
+imagestream.image.openshift.io/simple-node-app-git   image-registry.openshift-image-registry.svc:5000/test-project-gpu-dc1e23/simple-node-app-git
+
+NAME                                                        HOST/PORT
+                                                PATH   SERVICES                           PORT            TERMINATION          WILDCARD
+route.route.openshift.io/ds-pipeline-pipelines-definition   ds-pipeline-pipelines-definition-test-project-gpu-dc1e23.apps.shift.nerc.mghpcc.org          ds-pipeline-pipelines-definition   oauth           reencrypt/Redirect   None
+...
+
+
+

To list all Resources with their Names only.

+

To list all resources with their names only, you can run this command: +oc get all -oname.

+

Here, -oname flag specifies the output format. In this case, it instructs +the command to output only the names of the resources.

+
+

Run the oc delete command to delete all resource objects specified as +parameters after --all within your selected project (namespace).

+
oc delete pod,deployment,deploymentconfig,pvc,route,service,build,buildconfig,
+statefulset,replicaset,replicationcontroller,job,cronjob,imagestream,revision,
+configuration,notebook --all
+
+
+

Danger

+

The oc delete operation will cause all resources specfied will be deleted. +This command can be very powerful and should be used with caution as it will +delete all resources in the specified project.

+

Always ensure that you are targeting the correct project (namespace) when using +this command to avoid unintentional deletion of resources. If you're unsure +which namespace you're currently in, run the oc project command to display +the current project. To be safe, you can also specify the namespace in all +oc commands by using the -n option, e.g., -n <your_openshift_project_to_decommission>.

+

Make sure to backup any important data or configurations before executing this +command to prevent accidental data loss.

+
+

Please check all the resources currently being used by your project by running +oc get all, as shown below:

+
oc get all
+NAME                        TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)                               AGE
+service/modelmesh-serving   ClusterIP   None         <none>        8033/TCP,8008/TCP,8443/TCP,2112/TCP   7m4s
+
+
+

Important Note

+

The last remaining service, i.e., service/modelmesh-serving, shown when running +the oc get all command, is a REQUIRED resource, and so you don't need +to clean it up.

+
+

Use ColdFront to reduce the Storage Quota to Zero

+

Each allocation, whether requested or approved, will be billed based on the +pay-as-you-go model. The exception is for Storage quotas, where the cost +is determined by your requested and approved allocation values +to reserve storage from the total NESE storage pool. For NERC-OCP (OpenShift) +Resource Allocations, storage quotas are specified by the "OpenShift Request +on Storage Quota (GiB)" and "OpenShift Limit on Ephemeral Storage Quota (GiB)" +allocation attributes.

+

Even if you have deleted all Persistent Volume Claims (PVC) in your OpenShift project. +It is very essential to adjust the approved values for your NERC-OCP (OpenShift) +resource allocations to zero (0) otherwise you will still be incurring a charge +for the approved storage as explained in Billing FAQs.

+

To achieve this, you must submit a final change request to reduce the +Storage Quotas for "OpenShift Request on Storage Quota (GiB)" and "OpenShift +Limit on Ephemeral Storage Quota (GiB)" to zero (0) for your NERC-OCP (OpenShift) +resource type. You can review and manage these resource allocations by visiting +the resource allocations. Here, +you can filter the allocation of your interest and then proceed to request a +change request.

+
+

Very Important Note

+

Although other allocated resources i.e. CPU, RAM, GPU, etc. operate on a +pay-as-you-go model, wherein charges are incurred solely based on usage, +Active (Needs Renewal) allocations after "End Date" will remain accessible +to the users assigned under the allocation. It is advisable to set all other +allocation quota attributes to zero (0) during the change request. This +measure ensures that existing users will not accidentally use the resources +from the project.

+

Alternatively, PIs can control access to the allocation by removing users +assigned to their NERC-OCP (OpenShift) allocation. This ensures that even if +the allocation ends, users will not have access to the unused resources.

+
+

Please make sure your change request looks like this:

+

Change Request to Set All Quotas Zero

+

Wait until the requested resource allocation gets approved by the NERC's admin.

+

After approval, kindly review and verify that the quotas are accurately +reflected in your resource allocation +and OpenShift project. Please ensure +that the approved quota values are accurately displayed as explained here.

+

Review your Project Usage

+

Run the oc describe quota command to obtain detailed information about the resource +quotas for all Resources defined within your selected project (namespace). Please +note the name of the resource quota in the output of this command, i.e., <your_openshift_project_resource_quota_name>.

+
oc get quota
+
+NAME                              AGE   REQUEST                                                                               LIMIT
+<your_openshift_project_resource_quota_name>   105s   persistentvolumeclaims: 0/0, requests.nvidia.com/gpu: 0/0, requests.storage: 0/0   limits.cpu: 0/0, limits.ephemeral-storage: 0/0, limits.memory: 0/0
+
+
+

Very Important: Ensure No Resources that will be Billed are Used

+

Most importantly, ensure that there is no active usage for any of your +currently allocated project resources.

+
+

To review the resource quota usage for your project, you can run +oc describe quota <your_openshift_project_resource_quota_name>.

+

Please ensure the output appears as follows, with all Used and Hard resources +having a value of zero (0) as shown below:

+
oc describe quota <your_openshift_project_resource_quota_name>
+
+Name:                     <your_openshift_project_resource_quota_name>
+Namespace:                <your_openshift_project_to_decommission>
+Resource                  Used  Hard
+--------                  ----  ----
+limits.cpu                0     0
+limits.ephemeral-storage  0     0
+limits.memory             0     0
+persistentvolumeclaims    0     0
+requests.nvidia.com/gpu   0     0
+requests.storage          0     0
+
+
+

Important Information

+

Make sure to replace <your_openshift_project_resource_quota_name> with the +actual name you find in the output, which is typically in this format: <your_openshift_project_to_decommission>-project.

+
+

Review your Project's Resource Quota from the OpenShift Web Console

+

After removing all OpenShift resources and updating all resource quotas to set +them to zero (0), you can review and verify that these changes are reflected in +your OpenShift Web Console as well.

+

When you are logged-in to the NERC's OpenShift Web Console, you will be redirected +to the Developer perspective which is shown selected on the perspective switcher +located at the Left side. You need to switch to the Administrator perspective +to view your Project's Resource Quota as shown below:

+

Perspective Switcher

+

On the left sidebar, navigate to Administration -> ResourceQuotas.

+

Click on your appropriate project name, i.e., <your_openshift_project_to_decommission>, +to view the Resource Quota details.

+

Resource Quota Details

+
+

Very Important Note

+

It should also indicate that all resources have NO usage, i.e., zero (0), +and also NO maximum set, i.e., zero (0), as shown below:

+

Resource Quota Detail Info

+
+

Finally, Archive your ColdFront Project

+

As a PI, you will now be able to Archive your ColdFront Project via +accessing NERC's ColdFront interface. +Please refer to these intructions +on how to archive your projects that need to be decommissioned.

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openshift/decommission/images/change_request_zero.png b/openshift/decommission/images/change_request_zero.png new file mode 100644 index 00000000..25cfef21 Binary files /dev/null and b/openshift/decommission/images/change_request_zero.png differ diff --git a/openshift/decommission/images/cluster-storage-delete-rhoai-confirmation.png b/openshift/decommission/images/cluster-storage-delete-rhoai-confirmation.png new file mode 100644 index 00000000..1d43c988 Binary files /dev/null and b/openshift/decommission/images/cluster-storage-delete-rhoai-confirmation.png differ diff --git a/openshift/decommission/images/cluster-storage-delete-rhoai.png b/openshift/decommission/images/cluster-storage-delete-rhoai.png new file mode 100644 index 00000000..8f84fd7b Binary files /dev/null and b/openshift/decommission/images/cluster-storage-delete-rhoai.png differ diff --git a/openshift/decommission/images/delete-data-connections-rhoai-confirmation.png b/openshift/decommission/images/delete-data-connections-rhoai-confirmation.png new file mode 100644 index 00000000..90f2c972 Binary files /dev/null and b/openshift/decommission/images/delete-data-connections-rhoai-confirmation.png differ diff --git a/openshift/decommission/images/delete-data-connections-rhoai.png b/openshift/decommission/images/delete-data-connections-rhoai.png new file mode 100644 index 00000000..85dcd860 Binary files /dev/null and b/openshift/decommission/images/delete-data-connections-rhoai.png differ diff --git a/openshift/decommission/images/delete-model-server-rhoai-confirmation.png b/openshift/decommission/images/delete-model-server-rhoai-confirmation.png new file mode 100644 index 00000000..9dff8288 Binary files /dev/null and b/openshift/decommission/images/delete-model-server-rhoai-confirmation.png differ diff --git a/openshift/decommission/images/delete-model-server-rhoai.png b/openshift/decommission/images/delete-model-server-rhoai.png new file mode 100644 index 00000000..1c394b48 Binary files /dev/null and b/openshift/decommission/images/delete-model-server-rhoai.png differ diff --git a/openshift/decommission/images/delete-pipelines-rhoai-confirmation.png b/openshift/decommission/images/delete-pipelines-rhoai-confirmation.png new file mode 100644 index 00000000..c4bd4ca2 Binary files /dev/null and b/openshift/decommission/images/delete-pipelines-rhoai-confirmation.png differ diff --git a/openshift/decommission/images/delete-pipelines-rhoai.png b/openshift/decommission/images/delete-pipelines-rhoai.png new file mode 100644 index 00000000..84f80210 Binary files /dev/null and b/openshift/decommission/images/delete-pipelines-rhoai.png differ diff --git a/openshift/decommission/images/delete-workbench-rhoai-confirmation.png b/openshift/decommission/images/delete-workbench-rhoai-confirmation.png new file mode 100644 index 00000000..9729ae12 Binary files /dev/null and b/openshift/decommission/images/delete-workbench-rhoai-confirmation.png differ diff --git a/openshift/decommission/images/delete-workbench-rhoai.png b/openshift/decommission/images/delete-workbench-rhoai.png new file mode 100644 index 00000000..21d7a27e Binary files /dev/null and b/openshift/decommission/images/delete-workbench-rhoai.png differ diff --git a/openshift/decommission/images/perspective-switcher.png b/openshift/decommission/images/perspective-switcher.png new file mode 100644 index 00000000..6dcc1512 Binary files /dev/null and b/openshift/decommission/images/perspective-switcher.png differ diff --git a/openshift/decommission/images/resource_quota_detail_info.png b/openshift/decommission/images/resource_quota_detail_info.png new file mode 100644 index 00000000..37f74b8a Binary files /dev/null and b/openshift/decommission/images/resource_quota_detail_info.png differ diff --git a/openshift/decommission/images/resource_quota_details.png b/openshift/decommission/images/resource_quota_details.png new file mode 100644 index 00000000..6468add9 Binary files /dev/null and b/openshift/decommission/images/resource_quota_details.png differ diff --git a/openshift/decommission/images/rhoai-dashboard-after.png b/openshift/decommission/images/rhoai-dashboard-after.png new file mode 100644 index 00000000..3a69c45b Binary files /dev/null and b/openshift/decommission/images/rhoai-dashboard-after.png differ diff --git a/openshift/decommission/images/rhoai-dashboard-before.png b/openshift/decommission/images/rhoai-dashboard-before.png new file mode 100644 index 00000000..b2b74371 Binary files /dev/null and b/openshift/decommission/images/rhoai-dashboard-before.png differ diff --git a/openshift/decommission/images/the-nerc-openshift-web-console-link.png b/openshift/decommission/images/the-nerc-openshift-web-console-link.png new file mode 100644 index 00000000..1debfb1a Binary files /dev/null and b/openshift/decommission/images/the-nerc-openshift-web-console-link.png differ diff --git a/openshift/get-started/openshift-overview/index.html b/openshift/get-started/openshift-overview/index.html new file mode 100644 index 00000000..8920ad27 --- /dev/null +++ b/openshift/get-started/openshift-overview/index.html @@ -0,0 +1,4590 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

OpenShift Overview

+

OpenShift is a multifaceted, container orchestration platform from Red Hat. +OpenShift Container Platform is a cloud-based Kubernetes container platform. +NERC offers a cloud development Platform-as-a-Service (PaaS) solution based +on Red Hat's OpenShift Container Platform that provides isolated, multi-tenant +containers for application development and deployment. This is optimized for +continuous containerized application development and multi-tenant deployment +which allows you and your team to focus on solving your research problems and +not infrastructure management.

+

Basic Components and Glossary of common terms

+

OpenShift is a container orchestration platform that provides a number of components +and tools to help you build, deploy, and manage applications. Here are some of the +basic components of OpenShift:

+
    +
  • +

    Project: A project is a logical grouping of resources in the NERC's OpenShift + platform that provides isolation from others resources.

    +
  • +
  • +

    Nodes: Nodes are the physical or virtual machines that run the applications + and services in your OpenShift cluster.

    +
  • +
  • +

    Image: An image is a non-changing, definition of file structures and programs + for running an application.

    +
  • +
  • +

    Container: A container is an instance of an image with the addition of other + operating system components such as networking and running programs. Containers + are used to run applications and services in OpenShift.

    +
  • +
  • +

    Pods: Pods are the smallest deployable units defined, deployed, and managed + in OpenShift, that group related one or more containers that need to share resources.

    +
  • +
  • +

    Services: Services are logical representations of a set of pods that provide + a network endpoint for access to the application or service. Services can be + used to load balance traffic across multiple pods, and they can be accessed + using a stable DNS name. Services are assigned an IP address and port and proxy + connections to backend pods. This allows the pods to change while the connection + details of the service remain consistent.

    +
  • +
  • +

    Volume: A volume is a persistent file space available to pods and containers + for storing data. Containers are immutable and therefore upon a restart any + contents are cleared and reset to the original state of the image used to create + the container. Volumes provide storage space for files that need to persist + through container restarts.

    +
  • +
  • +

    Routes: Routes can be used to expose services to external clients to connections + outside the platform. A route is assigned a name in DNS when set up to make it + easily accessible. They can be configured with custom hostnames and TLS certificates.

    +
  • +
  • +

    Replication Controllers: A replication controller (rc) is a built-in mechanism + that ensures a defined number of pods are running at all times. An asset that + indicates how many pod replicas are required to run at a time. If a pod unexpectedly + quits or is deleted, a new copy of the pod is created and started. Additionally, + if more pods are running than the defined number, the replication controller + will delete the extra pods to get down to the defined number.

    +
  • +
  • +

    Namespace: A Namespace is a way to logically isolate resources within the + Cluster. In our case every project gets an unique namespace.

    +
  • +
  • +

    Role-based access control (RBAC): A key security control to ensure that cluster + users and workloads have only access to resources required to execute their roles.

    +
  • +
  • +

    Deployment Configurations: A deployment configuration (dc) is an extension + of a replication controller that is used to push out a new version of application + code. Deployment configurations are used to define the process of deploying + applications and services to OpenShift. Deployment configurations + can be used to specify the number of replicas, the resources required by the + application, and the deployment strategy to use.

    +
  • +
  • +

    Application URL Components: When an application developer adds an application + to a project, a unique DNS name is created for the application via a Route. All + application DNS names will have a hyphen separator between your application name + and your unique project namespace. If the application is a web application, this + DNS name is also used for the URL to access the application. All names are in + the form of <appname>-<mynamespace>.apps.shift.nerc.mghpcc.org. + For example: mytestapp-mynamespace.apps.shift.nerc.mghpcc.org.

    +
  • +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openshift/index.html b/openshift/index.html new file mode 100644 index 00000000..482132f0 --- /dev/null +++ b/openshift/index.html @@ -0,0 +1,4675 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

OpenShift Tutorial Index

+

If you're just starting out, we recommend starting from OpenShift Overview +and going through the tutorial in order.

+

If you just need to review a specific step, you can find the page you need in +the list below.

+

OpenShift Getting Started

+ +

OpenShift Web Console

+ +

OpenShift command-line interface (CLI) Tools

+ +

Creating Your First Application on OpenShift

+ +

Editing Applications

+ +

Storage

+ +

Deleting Applications

+ +

Decommission OpenShift Resources

+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openshift/logging-in/access-the-openshift-web-console/index.html b/openshift/logging-in/access-the-openshift-web-console/index.html new file mode 100644 index 00000000..192b900a --- /dev/null +++ b/openshift/logging-in/access-the-openshift-web-console/index.html @@ -0,0 +1,4505 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Access the NERC's OpenShift Web Console

+

The NERC's OpenShift Container Platform web console is a user interface that +can be accessed via the web.

+

You can find it at https://console.apps.shift.nerc.mghpcc.org.

+

The NERC Authentication supports CILogon using Keycloak for gateway authentication +and authorization that provides federated login via your institution accounts and +it is the recommended authentication method.

+

Make sure you are selecting "mss-keycloak" as shown here:

+

OpenShift Login with KeyCloak

+

Next, you will be redirected to CILogon welcome page as shown below:

+

CILogon Welcome Page

+

MGHPCC Shared Services (MSS) Keycloak will request approval of access to the +following information from the user:

+
    +
  • +

    Your CILogon user identifier

    +
  • +
  • +

    Your name

    +
  • +
  • +

    Your email address

    +
  • +
  • +

    Your username and affiliation from your identity provider

    +
  • +
+

which are required in order to allow access your account on NERC's OpenStack +web console.

+

From the "Selected Identity Provider" dropdown option, please select your institution's +name. If you would like to remember your selected institution name for future +logins please check the "Remember this selection" checkbox this will bypass the +CILogon welcome page on subsequent visits and proceed directly to the selected insitution's +identity provider(IdP). Click "Log On". This will redirect to your respective institutional +login page where you need to enter your institutional credentials.

+
+

Important Note

+

The NERC does not see or have access to your institutional account credentials, +it points to your selected insitution's identity provider and redirects back +once authenticated.

+
+

Once you successfully authenticate you should see a graphical user interface to +visualize your project data and perform administrative, management, and troubleshooting +tasks.

+

OpenShift Web Console

+
+

I can't find my project

+

If you are a member of several projects i.e. ColdFront NERC-OCP (OpenShift) +allocations, you may need to switch the project before you can see and use +OpenShift resources you or your team has created. Clicking on the project dropdown +which is displayed near the top left side will popup the list of projects you +are in. You can search and select the new project by hovering and clicking +on the project name in that list as shown below:

+

OpenStack Project List

+
+
+

Important Note

+

The default view for the OpenShift Container Platform web console is the Developer +perspective.

+
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openshift/logging-in/images/CILogon_interface.png b/openshift/logging-in/images/CILogon_interface.png new file mode 100644 index 00000000..fd1c073f Binary files /dev/null and b/openshift/logging-in/images/CILogon_interface.png differ diff --git a/openshift/logging-in/images/CLI-login-tools.png b/openshift/logging-in/images/CLI-login-tools.png new file mode 100644 index 00000000..70438c32 Binary files /dev/null and b/openshift/logging-in/images/CLI-login-tools.png differ diff --git a/openshift/logging-in/images/copy-oc-cli-login-command.png b/openshift/logging-in/images/copy-oc-cli-login-command.png new file mode 100644 index 00000000..b1d7b8a6 Binary files /dev/null and b/openshift/logging-in/images/copy-oc-cli-login-command.png differ diff --git a/openshift/logging-in/images/display-token.png b/openshift/logging-in/images/display-token.png new file mode 100644 index 00000000..6a7a5b37 Binary files /dev/null and b/openshift/logging-in/images/display-token.png differ diff --git a/openshift/logging-in/images/nerc_openshift_web_console.png b/openshift/logging-in/images/nerc_openshift_web_console.png new file mode 100644 index 00000000..498823e5 Binary files /dev/null and b/openshift/logging-in/images/nerc_openshift_web_console.png differ diff --git a/openshift/logging-in/images/oc-login-command.png b/openshift/logging-in/images/oc-login-command.png new file mode 100644 index 00000000..49e1a7dc Binary files /dev/null and b/openshift/logging-in/images/oc-login-command.png differ diff --git a/openshift/logging-in/images/openshift-web-console.png b/openshift/logging-in/images/openshift-web-console.png new file mode 100644 index 00000000..3a35b98a Binary files /dev/null and b/openshift/logging-in/images/openshift-web-console.png differ diff --git a/openshift/logging-in/images/openshift_login.png b/openshift/logging-in/images/openshift_login.png new file mode 100644 index 00000000..6e69a3cf Binary files /dev/null and b/openshift/logging-in/images/openshift_login.png differ diff --git a/openshift/logging-in/images/openshift_project_list.png b/openshift/logging-in/images/openshift_project_list.png new file mode 100644 index 00000000..b2d42ee5 Binary files /dev/null and b/openshift/logging-in/images/openshift_project_list.png differ diff --git a/openshift/logging-in/images/perspective-switcher.png b/openshift/logging-in/images/perspective-switcher.png new file mode 100644 index 00000000..6f7956e9 Binary files /dev/null and b/openshift/logging-in/images/perspective-switcher.png differ diff --git a/openshift/logging-in/images/project-list.png b/openshift/logging-in/images/project-list.png new file mode 100644 index 00000000..9c9c8f54 Binary files /dev/null and b/openshift/logging-in/images/project-list.png differ diff --git a/openshift/logging-in/setup-the-openshift-cli/index.html b/openshift/logging-in/setup-the-openshift-cli/index.html new file mode 100644 index 00000000..4fa8797f --- /dev/null +++ b/openshift/logging-in/setup-the-openshift-cli/index.html @@ -0,0 +1,4604 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

How to Setup the OpenShift CLI Tools

+

The most commonly used command-line client tool for the NERC's OpenShift is +OpenShift CLI (oc). +It is available for Linux, Windows, or macOS and allows you to create +applications and manage OpenShift Container Platform projects from a terminal.

+

Installing the OpenShift CLI

+

Installation options for the CLI vary depending on your Operating System (OS). +You can install the OpenShift CLI (oc) either by downloading the binary or by using +an Package Manager (RPM).

+

Unlike the web console, it allows the user to work directly with the project +source code using command scripts once they are authenticated using token.

+

You can download the latest oc CLI client tool binary from web console as shown +below:

+

oc - OpenShift Command Line Interface (CLI) Binary Download

+

Then add it to your path environment based on your OS choice by following this documentation.

+

Configuring the OpenShift CLI

+

You can configure the oc command tool to enable tab completion to automatically +complete oc commands or suggest options when you press Tab for the Bash or Zsh +shells by following these steps.

+

First Time Usage

+

Before you can use the oc command-line tool, you will need to authenticate to the +NERC's OpenShift platform by running built-in login command obtained from the +NERC's OpenShift Web Console. This will allow authentication and enables you to +work with your NERC's OpenShift Container Platform projects. It will create a session +that will last approximately 24 hours.

+

To get the oc login command with your own unique token, please login to the NERC's +OpenShift Web Console and then under your user profile link located at the top right +corner, click on Copy login command as shown below:

+

Copy oc CLI Login Command

+

It will once again ask you to provide your KeyCloak login and then once successful +it will redirect you to a static page with a link to Display Token as shown below:

+

Display Token

+

Clicking on that "Display Token" link it will show a static page with Login command +with token as shown below:

+

oc Login Command with Token

+

Copy and run the generated command on your terminal to authenticate yourself to +access the project from your terminal i.e. oc login --token=<Your-Token> --server=https://<NERC-OpenShift-Server>

+

If you try to run an oc command and get a permission denied message, your login +session has likely expired and you will need to re-generate the oc login command +from your NERC's OpenShift Web Console and then run the new oc login command with +new token on your terminal.

+

Other Useful oc Commands

+

This reference document +provides descriptions and example commands for OpenShift CLI (oc) developer commands.

+
+

Important Note

+

Run oc help to list all commands or run oc <command> --help to get additional +details for a specific command.

+
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openshift/logging-in/the-openshift-cli/index.html b/openshift/logging-in/the-openshift-cli/index.html new file mode 100644 index 00000000..c650b460 --- /dev/null +++ b/openshift/logging-in/the-openshift-cli/index.html @@ -0,0 +1,4466 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

OpenShift command-line interface (CLI) Tools Overview

+

With the OpenShift CLI, the oc command, you can create applications and manage +OpenShift Container Platform projects from a terminal.

+

The web console provides a comprehensive set of tools for managing your projects +and applications. There are, however, some tasks that can only be performed using +a command-line tool called oc.

+

The OpenShift CLI is ideal in the following situations:

+
    +
  • +

    Working directly with project source code

    +
  • +
  • +

    Scripting OpenShift Container Platform operations

    +
  • +
  • +

    Managing projects while restricted by bandwidth resources and the web console + is unavailable

    +
  • +
+

It is recommended that developers should be comfortable with simple command-line +tasks and the the NERC's OpenShift command-line tool.

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openshift/logging-in/web-console-overview/index.html b/openshift/logging-in/web-console-overview/index.html new file mode 100644 index 00000000..6b1b1f72 --- /dev/null +++ b/openshift/logging-in/web-console-overview/index.html @@ -0,0 +1,4852 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Web Console Overview

+

The NERC's OpenShift Container Platform (OCP) has a web-based console that can be +used to perform common management tasks such as building and deploying applications.

+

You can find it at https://console.apps.shift.nerc.mghpcc.org.

+

The web console provides tools to access and manage your application code and data.

+

Below is a sample screenshot of the web interface with labels describing different +sections of the NERC's OpenShift Web Console:

+

NERC's OpenShift Web Console Screenshot

+
    +
  1. +

    Perspective Switcher - Drop-down to select a different perspective. The available + perspectives are a Developer view and an Administrator view.

    +
  2. +
  3. +

    Project List - Drop-down to select a different project. Based on user's active + and approved resource allocations this projects list will be updated.

    +
  4. +
  5. +

    Navigation Menu - Menu options to access different tools and settings for a project. + The list will change depending on which Perspective view you are in.

    +
  6. +
  7. +

    User Preferences - Shown the option to get and copy the OpenShift Command Line + oc login command and set your individual console preferences including default + views, language, import settings, and more.

    +
  8. +
  9. +

    View Switcher - This three dot menu is used to switch between List View + and Graph view of all your applications.

    +
  10. +
  11. +

    Main Panel - Displays basic application information. Clicking on the application + names in the main panel expands the Details Panel (7).

    +
  12. +
  13. +

    Details Panel - Displays additional information about the application selected + from the Main Panel. This includes detailed information about the running application, + applications builds, routes, and more. Tabs at the top of this panel will change + the view to show additional information such as Details and Resources.

    +
  14. +
+
+

Perspective Switcher

+

When you are logged-in, you will be redirected to the Developer perspective +which is shown selected on the perspective switcher located at the Left side. You +can switch between the Administrator perspective and the Developer perspective +as per your roles and permissions in a project.

+

Perspective Switcher

+

About the Administrator perspective in the web console

+

The Administrator perspective enables you to view the cluster inventory, capacity, +general and specific utilization information, and the stream of important events, +all of which help you to simplify planning and troubleshooting tasks. Both project +administrators and cluster administrators can view the Administrator perspective.

+
+

Important Note

+
+

The default web console perspective that is shown depends on the role of the +user. The Administrator perspective is displayed by default if the user is +recognized as an administrator.

+

About the Developer perspective in the web console

+

The Developer perspective offers several built-in ways to deploy applications, +services, and databases.

+
+

Important Note

+
+

The default view for the OpenShift Container Platform web console is the Developer +perspective.

+

The web console provides a comprehensive set of tools for managing your projects +and applications.

+

Project List

+

You can select or switch your projects from the available project drop-down list +located on top navigation as shown below:

+

Project List

+
+

Important Note

+
+

You can identify the currently selected project with tick mark and also +you can click on star icon to keep the project under your Favorites list.

+ +

Topology

+

The Topology view in the Developer perspective of the web console provides a +visual representation of all the applications within a project, their build status, +and the components and services associated with them. If you have no workloads or +applications in the project, the Topology view displays the available options to +create applications. If you have existing workloads, the Topology view graphically +displays your workload nodes. To read more about how to view the topology of +your application please read this official documentation from Red Hat

+

Observe

+

This provides you with a Dashboard to view the resource usage and also other metrics +and events that occured on your project. Here you can identify, monitor, and inspect +the usage of Memory, CPU, Network, and Storage in your project.

+ +

This allows you to search any resources based on search criteria like Label or Name.

+

Builds

+

This menu provides tools for building and deploying applications. You can use it +to create and manage build configurations using YAML syntax, as well as view the +status and logs of your builds.

+

Helm

+

You can enable the Helm Charts here. Helm Charts is the pacakge manager that help +to easily manage definitions, installations and upgrades of you complex application. +It also shows catalog of all available helm charts for you to use by installing them.

+

Project

+

This allows you to view the overview of the currently selected project from the +drop-down list and also details about it including resource utilization and +resource quotas.

+

ConfigMaps

+

This menu allows you to view or create a new ConfigMap by entering manually YAML +or JSON definitions, or by dragging and dropping a file into the editor.

+

Secrets

+

This allows you to view or create Secrets that allows to inject sensitive data +into your application as files or environment variables.

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openshift/storage/storage-overview/index.html b/openshift/storage/storage-overview/index.html new file mode 100644 index 00000000..33cfe11e --- /dev/null +++ b/openshift/storage/storage-overview/index.html @@ -0,0 +1,4746 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Storage Overview

+

The NERC OCP supports multiple types of storage.

+

Glossary of common terms for OCP storage

+

This glossary defines common terms that are used in the storage content.

+

Storage

+

OCP supports many types of storage, both for on-premise and cloud providers. You +can manage container storage for persistent and non-persistent data in an OCP cluster.

+

Storage class

+

A storage class provides a way for administrators to describe the classes of storage +they offer. Different classes might map to quality of service levels, backup policies, +arbitrary policies determined by the cluster administrators.

+

Storage types

+

OCP storage is broadly classified into two categories, namely ephemeral storage +and persistent storage.

+

Ephemeral storage

+

Pods and containers are ephemeral or transient in nature and designed for stateless +applications. Ephemeral storage allows administrators and developers to better manage +the local storage for some of their operations. For more information about ephemeral +storage overview, types, and management, see Understanding ephemeral storage.

+

Pods and containers can require temporary or transient local storage for their +operation. The lifetime of this ephemeral storage does not extend beyond the life +of the individual pod, and this ephemeral storage cannot be shared across pods.

+

Persistent storage

+

Stateful applications deployed in containers require persistent storage. OCP uses +a pre-provisioned storage framework called persistent volumes (PV) to allow cluster +administrators to provision persistent storage. The data inside these volumes can +exist beyond the lifecycle of an individual pod. Developers can use persistent +volume claims (PVCs) to request storage requirements. For more information about +persistent storage overview, configuration, and lifecycle, see Understanding +persistent storage.

+

Pods and containers can require permanent storage for their operation. OpenShift +Container Platform uses the Kubernetes persistent volume (PV) framework to allow +cluster administrators to provision persistent storage for a cluster. Developers +can use PVC to request PV resources without having specific knowledge of the +underlying storage infrastructure.

+

Persistent volumes (PV)

+

OCP uses the Kubernetes persistent volume (PV) framework to allow cluster +administrators to provision persistent storage for a cluster. Developers can use +PVC to request PV resources without having specific knowledge of the underlying +storage infrastructure.

+

Persistent volume claims (PVCs)

+

You can use a PVC to mount a PersistentVolume into a Pod. You can access the +storage without knowing the details of the cloud environment.

+
+

Important Note

+

A PVC is in active use by a pod when a Pod object exists that uses the PVC.

+
+

Access modes

+

Volume access modes describe volume capabilities. You can use access modes to match +persistent volume claim (PVC) and persistent volume (PV). The following are the +examples of access modes:

+ + + + + + + + + + + + + + + + + + + + + + + + + +
Storage ClassDescription
ReadWriteOnce (RWO)Allows read-write access to the volume by a single node at a time.
ReadOnlyMany (ROX)Allows multiple nodes to read from the volume simultaneously, but only one node can write to it.
ReadWriteMany (RWX)Allows multiple nodes to read from and write to the volume simultaneously.
ReadWriteOncePod (RWOP)Allows read-write access to the volume by multiple pods running on the same node simultaneously.
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/access-and-security/create-a-key-pair/index.html b/openstack/access-and-security/create-a-key-pair/index.html new file mode 100644 index 00000000..01e87dbf --- /dev/null +++ b/openstack/access-and-security/create-a-key-pair/index.html @@ -0,0 +1,4845 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Create a Key-pair

+
+

NOTE

+

If you will be using PuTTY on Windows, please read this first.

+
+

Add a Key Pair

+

For security, the VM images have password authentication disabled by default, +so you will need to use an SSH key pair to log in.

+

You can view key pairs by clicking Project, then click Compute panel and choose +Key Pairs from the tabs that appears. This shows the key pairs that are +available for this project.

+

Key Pairs

+

Generate a Key Pair

+
+

Prerequisite

+

You need ssh installed in your system.

+
+

You can create a key pair on your local machine, then upload the public key to +the cloud. This is the recommended method.

+

Open a terminal and type the following commands (in this example, we have named +the key cloud.key, but you can name it anything you want):

+
cd ~/.ssh
+ssh-keygen -t rsa -f ~/.ssh/cloud.key -C "label_your_key"
+
+

Example:

+

Generate Key Pair

+

You will be prompted to create a passphrase for the key. +IMPORTANT: Do not forget the passphrase! If you do, you will be unable to use +your key.

+

This process creates two files in your .ssh folder:

+
cloud.key      # private key - don’t share this with anyone, and never upload
+# it anywhere ever
+cloud.key.pub  # this is your public key
+
+
+

Pro Tip

+

The -C "label" field is not required, but it is useful to quickly identify +different public keys later.

+

You could use your email address as the label, or a user@host tag that +identifies the computer the key is for.

+

For example, if Bob has both a laptop and a desktop computer that he will, +he might use -C "Bob@laptop" to label the key he generates on the laptop, +and -C "Bob@desktop" for the desktop.

+
+

On your terminal:

+
pbcopy < ~/.ssh/cloud.key.pub  #copies the contents of public key to your clipboard
+
+
+

Pro Tip

+

If pbcopy isn't working, you can locate the hidden .ssh folder, open the +file in your favorite text editor, and copy it to your clipboard.

+
+

Import the generated Key Pair

+

Now that you have created your keypair in ~/.ssh/cloud.key.pub, you can upload +it to OpenStack by either using Horizon dashboard or +OpenStack CLI as +described below:

+

1. Using NERC's Horizon dashboard

+

Go back to the Openstack Dashboard, where you should still be on the Key Pairs tab

+

(If not, find it under Project -> Compute -> Key Pairs)

+

Choose "Import Public Key". Give the key a name in the "Key Pair Name" Box, +choose "SSH Key" as the Key Type dropdown option and paste the public key that +you just copied in the "Public Key" text box.

+

Import Key Pair

+

Click "Import Public Key". You will see your key pair appear in the list.

+

New Key Pair

+

You can now skip ahead to Adding the key to an ssh-agent.

+

2. Using the OpenStack CLI

+

Prerequisites:

+

To run the OpenStack CLI commands, you need to have:

+ +

To create OpenStack keypair using the CLI, do this:

+

Using the openstack client commands

+

Now that you have created your keypair in ~/.ssh/cloud.key.pub, you can upload +it to OpenStack with name "my-key" as follows:

+
openstack keypair create --public-key ~/.ssh/cloud.key.pub my-key
++-------------+-------------------------------------------------+
+| Field       | Value                                           |
++-------------+-------------------------------------------------+
+| created_at  | None                                            |
+| fingerprint | 1c:40:db:ea:82:c2:c3:05:58:81:84:4b:e3:4f:c2:a1 |
+| id          | my-key                                          |
+| is_deleted  | None                                            |
+| name        | my-key                                          |
+| type        | ssh                                             |
+| user_id     | 938eb8bfc72e4ca3ad2c94e2eb4059f7                |
++-------------+-------------------------------------------------+
+
+

Create a Key Pair using Horizon dashboard

+

Alternatively, if you are having trouble creating and importing a key pair with +the instructions above, the Openstack Horizon dashboard can make one for you.

+

Click "Create a Key Pair", and enter a name for the key pair.

+

Create Key Pair

+

Click on "Create a Key Pair" button. You will be prompted to download a .pem +file containing your private key.

+

In the example, we have named the key 'cloud_key.pem', but you can name it anything.

+

Save this file to your hard drive, for example in your Downloads folder.

+

Copy this key inside the .ssh folder on your local machine/laptop, using the +following steps:

+
cd ~/Downloads          # Navigate to the folder where you saved the .pem file
+mv cloud.pem ~/.ssh/    # This command will copy the key you downloaded to
+# your .ssh folder.
+cd ~/.ssh               # Navigate to your .ssh folder
+chmod 400 cloud.pem     # Change the permissions of the file
+
+

To see your public key, navigate to Project -> Compute -> Key Pairs

+

You should see your key in the list.

+

Key Pairs List

+

If you click on the name of the newly added key, you will see a screen of +information that includes details about your public key:

+

View Key Pair Detail

+

The public key is the part of the key you distribute to VMs and remote servers.

+

You may find it convenient to paste it into a file inside your .ssh folder, +so you don't always need to log into the website to see it.

+

Call the file something like cloud_key.pub to distinguish it from your +private key.

+
+

Very Important: Security Best Practice

+

Never share your private key with anyone, or upload it to a server!

+
+

Adding your SSH key to the ssh-agent

+

If you have many VMs, you will most likely be using one or two VMs with public +IPs as a gateway to others which are not reachable from the internet.

+

In order to be able to use your key for multiple SSH hops, do NOT copy your +private key to the gateway VM!

+

The correct method to use Agent Forwarding, which adds the key to an ssh-agent +on your local machine and 'forwards' it over the SSH connection.

+

If ssh-agent is not already running in background, you need to start the +ssh-agent in the background.

+
eval "$(ssh-agent -s)"
+> Agent pid 59566
+
+

Then, add the key to your ssh agent:

+
cd ~/.ssh
+ssh-add cloud.key
+Identity added: cloud.key (test_user@laptop)
+
+

Check that it is added with the command

+
ssh-add -l
+2048 SHA256:D0DLuODzs15j2OaZnA8I52aEeY3exRT2PCsUyAXgI24 test_user@laptop (RSA)
+
+

Depending on your system, you might have to repeat these steps after you reboot +or log out of your computer.

+

You can always check if your ssh key is added by running the ssh-add -l command.

+

A key with the default name id_rsa will be added by default at login, although +you will still need to unlock it with your passphrase the first time you use it.

+

Once the key is added, you will be able to forward it over an SSH connection, +like this:

+
ssh -A -i cloud.key <username>@<remote-host-IP>
+
+

Connecting via SSH is discussed in more detail later in the tutorial (SSH to +Cloud VM); for now, just +proceed to the next step below.

+

SSH keys with PuTTY on Windows

+

PuTTY requires SSH keys to be in its own ppk format. To convert between +OpenSSH keys used by OpenStack and PuTTY's format, you need a utility called PuTTYgen.

+

If it was not installed when you originally installed PuTTY, you can get it +here: Download PuTTY.

+

You have 2 options for generating keys that will work with PuTTY:

+
    +
  1. +

    Generate an OpenSSH key with ssh-keygen or from the Horizon GUI using the + instructions above, then use PuTTYgen to convert the private key to .ppk

    +
  2. +
  3. +

    Generate a .ppk key with PuTTYgen, and import the provided OpenSSH public + key to OpenStack using the 'Import the generated Key Pair' instructions + above.

    +
  4. +
+

There is a detailed walkthrough of how to use PuTTYgen here: Use SSH Keys with +PuTTY on Windows.

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/access-and-security/images/added_rdp_security_rule.png b/openstack/access-and-security/images/added_rdp_security_rule.png new file mode 100644 index 00000000..ca8b9479 Binary files /dev/null and b/openstack/access-and-security/images/added_rdp_security_rule.png differ diff --git a/openstack/access-and-security/images/added_ssh_security_rule.png b/openstack/access-and-security/images/added_ssh_security_rule.png new file mode 100644 index 00000000..e7a0ad90 Binary files /dev/null and b/openstack/access-and-security/images/added_ssh_security_rule.png differ diff --git a/openstack/access-and-security/images/adding_new_security_groups.png b/openstack/access-and-security/images/adding_new_security_groups.png new file mode 100644 index 00000000..41105765 Binary files /dev/null and b/openstack/access-and-security/images/adding_new_security_groups.png differ diff --git a/openstack/access-and-security/images/create_key.png b/openstack/access-and-security/images/create_key.png new file mode 100644 index 00000000..a856cf15 Binary files /dev/null and b/openstack/access-and-security/images/create_key.png differ diff --git a/openstack/access-and-security/images/create_rdp_security_group.png b/openstack/access-and-security/images/create_rdp_security_group.png new file mode 100644 index 00000000..866f612d Binary files /dev/null and b/openstack/access-and-security/images/create_rdp_security_group.png differ diff --git a/openstack/access-and-security/images/create_security_group.png b/openstack/access-and-security/images/create_security_group.png new file mode 100644 index 00000000..b25b8c7c Binary files /dev/null and b/openstack/access-and-security/images/create_security_group.png differ diff --git a/openstack/access-and-security/images/default_security_group_rules.png b/openstack/access-and-security/images/default_security_group_rules.png new file mode 100644 index 00000000..cfb62096 Binary files /dev/null and b/openstack/access-and-security/images/default_security_group_rules.png differ diff --git a/openstack/access-and-security/images/edit_security_group.png b/openstack/access-and-security/images/edit_security_group.png new file mode 100644 index 00000000..9405361a Binary files /dev/null and b/openstack/access-and-security/images/edit_security_group.png differ diff --git a/openstack/access-and-security/images/generate_key.png b/openstack/access-and-security/images/generate_key.png new file mode 100644 index 00000000..36675b33 Binary files /dev/null and b/openstack/access-and-security/images/generate_key.png differ diff --git a/openstack/access-and-security/images/import-key-pair.png b/openstack/access-and-security/images/import-key-pair.png new file mode 100644 index 00000000..6c71ca15 Binary files /dev/null and b/openstack/access-and-security/images/import-key-pair.png differ diff --git a/openstack/access-and-security/images/key-pairs.png b/openstack/access-and-security/images/key-pairs.png new file mode 100644 index 00000000..0943c922 Binary files /dev/null and b/openstack/access-and-security/images/key-pairs.png differ diff --git a/openstack/access-and-security/images/key_pairs_list.png b/openstack/access-and-security/images/key_pairs_list.png new file mode 100644 index 00000000..623630b0 Binary files /dev/null and b/openstack/access-and-security/images/key_pairs_list.png differ diff --git a/openstack/access-and-security/images/new_key_pair.png b/openstack/access-and-security/images/new_key_pair.png new file mode 100644 index 00000000..45c126c6 Binary files /dev/null and b/openstack/access-and-security/images/new_key_pair.png differ diff --git a/openstack/access-and-security/images/ping_icmp_security_rule.png b/openstack/access-and-security/images/ping_icmp_security_rule.png new file mode 100644 index 00000000..cca96a57 Binary files /dev/null and b/openstack/access-and-security/images/ping_icmp_security_rule.png differ diff --git a/openstack/access-and-security/images/rdp_security_group_rules_options.png b/openstack/access-and-security/images/rdp_security_group_rules_options.png new file mode 100644 index 00000000..25f5f5ab Binary files /dev/null and b/openstack/access-and-security/images/rdp_security_group_rules_options.png differ diff --git a/openstack/access-and-security/images/security_group_add_rule.png b/openstack/access-and-security/images/security_group_add_rule.png new file mode 100644 index 00000000..3bdc2e1d Binary files /dev/null and b/openstack/access-and-security/images/security_group_add_rule.png differ diff --git a/openstack/access-and-security/images/security_group_rules.png b/openstack/access-and-security/images/security_group_rules.png new file mode 100644 index 00000000..b8e26689 Binary files /dev/null and b/openstack/access-and-security/images/security_group_rules.png differ diff --git a/openstack/access-and-security/images/security_group_rules_options.png b/openstack/access-and-security/images/security_group_rules_options.png new file mode 100644 index 00000000..80522adb Binary files /dev/null and b/openstack/access-and-security/images/security_group_rules_options.png differ diff --git a/openstack/access-and-security/images/security_groups.png b/openstack/access-and-security/images/security_groups.png new file mode 100644 index 00000000..741d48df Binary files /dev/null and b/openstack/access-and-security/images/security_groups.png differ diff --git a/openstack/access-and-security/images/sg_new_rule.png b/openstack/access-and-security/images/sg_new_rule.png new file mode 100644 index 00000000..fd582fe9 Binary files /dev/null and b/openstack/access-and-security/images/sg_new_rule.png differ diff --git a/openstack/access-and-security/images/sg_view.png b/openstack/access-and-security/images/sg_view.png new file mode 100644 index 00000000..4f829544 Binary files /dev/null and b/openstack/access-and-security/images/sg_view.png differ diff --git a/openstack/access-and-security/images/view_public_key.png b/openstack/access-and-security/images/view_public_key.png new file mode 100644 index 00000000..3a36e7be Binary files /dev/null and b/openstack/access-and-security/images/view_public_key.png differ diff --git a/openstack/access-and-security/security-groups/index.html b/openstack/access-and-security/security-groups/index.html new file mode 100644 index 00000000..f6ffc78e --- /dev/null +++ b/openstack/access-and-security/security-groups/index.html @@ -0,0 +1,4773 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Security Groups

+

Security groups can be thought of like firewalls. They ultimately control inbound +and outbound traffic to your virtual machines.

+

Before you launch an instance, you should add security group rules to enable +users to ping and use SSH to connect to the instance. Security groups are sets +of IP filter rules that define networking access and are applied to all +instances within a project. To do so, you either add rules to the default +security group Add a rule to the default security group or add a new security +group with rules.

+

You can view security groups by clicking Project, then click Network panel and +choose Security Groups from the tabs that appears.

+

Navigate to Project -> Network -> Security Groups.

+

You should see a ‘default’ security group. The default security group allows +traffic only between members of the security group, so by default you can +always connect between VMs in this group. However, it blocks all traffic from +outside, including incoming SSH connections. In order to access instances via a +public IP, an additional security group is needed. on the other hand, for a VM that +hosts a web server, you need a security group which allows access to ports 80 +(for http) and 443 (for https).

+

Security Groups

+
+

Important Note

+

We strongly advise against altering the default security group and suggest +refraining from adding extra security rules to it. This is because the +default security group is automatically assigned to any newly created VMs. +It is considered a best practice to create separate security groups for related +services, as these groups can be reused multiple times.Security groups are +very highly configurable, for insance, you might create a basic/ generic group +for ssh (port 22) and icmp (which is what we will show as an example here) +and then a separate security group for http (port 80) and https (port 443) +access if you're running a web service on your instance.

+
+

You can also limit access based on where the traffic originates, using either +IP addresses or security groups to define the allowed sources.

+

Create a new Security Group

+

Allowing SSH

+

To allow access to your VM for things like SSH, you will need to create a +security group and add rules to it.

+

Click on "Create Security Group". Give your new group a name, and a brief description.

+

Create a Security Group

+

You will see some existing rules:

+

Existing Security Group Rules

+

Let's create the new rule to allow SSH. Click on "Add Rule".

+

You will see there are a lot of options you can configure on the Add Rule +dialog box.

+
+

To check all available Rule

+

You can choose the desired rule template as shown under Rule dropdown options. +This will automatically select the Port required for the selected custom rule.

+

Security Group Rules Option

+
+

Adding SSH in Security Group Rules

+

Enter the following values:

+
    +
  • +

    Rule: SSH

    +
  • +
  • +

    Remote: CIDR

    +
  • +
  • +

    CIDR: 0.0.0.0/0

    +
    +

    Note

    +

    To accept requests from a particular range of IP addresses, specify the IP +address block in the CIDR box.

    +
    +
  • +
+

The new rule now appears in the list. This signifies that any instances using +this newly added Security Group will now have SSH port 22 open for requests +from any IP address.

+

Adding SSH in Security Group Rules

+

Allowing Ping

+

The default configuration blocks ping responses, so you will need to add an +additional group and/or rule +if you want your public IPs to respond to ping requests.

+

Ping is ICMP traffic, so the easiest way to allow it is to add a new rule and +choose "ALL ICMP" from the dropdown.

+

In the Add Rule dialog box, enter the following values:

+
    +
  • +

    Rule: All ICMP

    +
  • +
  • +

    Direction: Ingress

    +
  • +
  • +

    Remote: CIDR

    +
  • +
  • +

    CIDR: 0.0.0.0/0

    +
  • +
+

Adding ICMP - ping in Security Group Rules

+

Instances will now accept all incoming ICMP packets.

+

Allowing RDP

+

To allow access to your VM for things like Remote Desktop Protocol (RDP), you will +need to create a security group and add rules to it.

+

Click on "Create Security Group". Give your new group a name, and a brief description.

+

Create a RDP Security Group

+

You will see some existing rules:

+

Existing Security Group Rules

+

Let's create the new rule to allow SSH. Click on "Add Rule".

+

You will see there are a lot of options you can configure on the Add Rule +dialog box.

+

Choose "RDP" from the Rule dropdown option as shown below:

+

Adding RDP in Security Group Rules

+

Enter the following values:

+
    +
  • +

    Rule: RDP

    +
  • +
  • +

    Remote: CIDR

    +
  • +
  • +

    CIDR: 0.0.0.0/0

    +
  • +
+
+

Note

+

To accept requests from a particular range of IP addresses, specify the IP +address block in the CIDR box.

+
+

The new rule now appears in the list. This signifies that any instances using +this newly added Security Group will now have RDP port 3389 open for requests +from any IP address.

+

Adding RDP in Security Group Rules

+

Editing Existing Security Group and Adding New Security Rules

+
    +
  • +

    Navigate to Security Groups:

    +

    Navigate to Project -> Network -> Security Groups.

    +
  • +
  • +

    Select the Security Group:

    +

    Choose the security group to which you want to add new rules.

    +
  • +
  • +

    Add New Rule:

    +

    Look for an option to add a new rule within the selected security group.

    +

    View the security group

    +

    Specify the protocol, port range, and source/destination details for the new +rule.

    +

    Add New Security Rules

    +
  • +
  • +

    Save Changes:

    +

    Save the changes to apply the new security rules to the selected security group.

    +
  • +
+
+

Important Note

+

Security group changes may take some time to propagate to the instances +associated with the modified group. Ensure that new rules align with your +network security requirements.

+
+

Update Security Group(s) to a running VM

+

If you want to attach/deattach any new Security Group(s) to a running VM after it +was launched. First create all new Security Group(s) with all rules required as +described here. Note that same Security Groups can be used by multiple VMs +so don't create same or redundant Security Rules based Security Groups as +there are Quota per project. Once have created all Security Groups, you can +easily attach them with any existing VM(s). You can select the VM from +Compute -> Instances tab and then select "Edit Security Groups" as shown below:

+

Edit Security Groups

+

Then select all Security Group(s) that you want to attach to this VM by clicking +on "+" icon and then click "Save" as shown here:

+

Select Security Groups

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/advanced-openstack-topics/domain-name-system/domain-names-for-your-vms/index.html b/openstack/advanced-openstack-topics/domain-name-system/domain-names-for-your-vms/index.html new file mode 100644 index 00000000..b31f271e --- /dev/null +++ b/openstack/advanced-openstack-topics/domain-name-system/domain-names-for-your-vms/index.html @@ -0,0 +1,4795 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

DNS services in NERC OpenStack

+

What is DNS?

+

The Domain Name System (DNS) is a ranked and distributed system for naming resources +connected to a network, and works by storing various types of record, such as an +IP address associated with a domain name.

+

DNS simplifies the communication between computers and servers through a network +and provides a user-friendly method for users to interact with and get the desired +information.

+

How to get user-friendly domain names for your NERC VMs?

+

NERC does not currently offer integrated domain name service management.

+

You can use one of the following methods to configure name resolution (DNS) for +your NERC's virtual instances.

+

1. Using freely available free Dynamic DNS services

+

Get a free domain or host name from no-ip.com or other

+

free Dynamic DNS services.

+

Here we will describe how to use No-IP to configure dynamic DNS.

+

Step 1: Create your No-IP Account.

+

No-IP Account Signup

+

During this process you can add your desired unique hostname with pre-existing +domain name or you can choose to create your hostname later on.

+

Create No-IP Account

+

Step 2: Confirm Your Account by verifing your email address.

+

Activate Your Account

+

Step 3: Log In to Your Account to view your dashboard.

+

Dashboard

+

Step 4: Add Floating IP of your instance to the Hostname.

+

Click on "Modify" to add your own Floating IP attached to your NERC virtual instance.

+

Update Floating IP on Hostname

+

Then, browse your host or domain name as you setup during registration or later +i.e. http://nerc.hopto.org on above example.

+

Easy video tutorial can be found here.

+

Having a free option is great for quick demonstrate your project but this has +the following restrictions:

+

no-ip Free vs Paid Version

+

2. Using Nginx Proxy Manager

+

You can setup Nginx Proxy Manager on one of +your NERC VMs and then use this Nginx Proxy Manager as your gateway to forward +to your other web based services.

+

Quick Setup

+

i. Launch a VM +with a security group that has opened rule for port 80, 443 and 22 to +enable SSH Port Forwarding, aka SSH Tunneling +i.e. Local Port Forwarding into the VM.

+

ii. SSH into your VM +using your private key after attaching a Floating IP.

+

iii. Install Docker and Docker-Compose +based on your OS choice for your VM.

+

iv. Create a docker-compose.yml file similar to this:

+
version: "3"
+services:
+    app:
+        image: "jc21/nginx-proxy-manager:latest"
+        restart: unless-stopped
+        ports:
+            - "80:80"
+            - "81:81"
+            - "443:443"
+        volumes:
+            - ./data:/data
+            - ./letsencrypt:/etc/letsencrypt
+
+

v. Bring up your stack by running:

+
docker-compose up -d
+
+# If using docker-compose-plugin
+docker compose up -d
+
+

vi. Once the docker container runs successfully, connect to it on Admin Web Port +i.e. 81 opened for the admin interface via SSH Tunneling i.e. Local Port Forwarding +from your local machine's terminal by running:

+

ssh -N -L <Your_Preferred_Port>:localhost:81 <User>@<Floating-IP> -i <Path_To_Your_Private_Key>

+

Here, you can choose any port that is available on your machine as <Your_Preferred_Port> +and then VM's assigned Floating IP as <Floating-IP> and associated Private +Key pair attached to the VM as <Path_To_Your_Private_Key>.

+

For e.g. ssh -N -L 8081:localhost:81 ubuntu@199.94.60.24 -i ~/.ssh/cloud.key

+

vii. Once the SSH Tunneling is successful, log in to the Nginx Proxy Manager +Admin UI on your web browser: +http://localhost:<Your_Preferred_Port> i.e. http://localhost:8081

+
+

Information

+

It may take some time to spin up the Admin UI. Your terminal running the SSH +Tunneling i.e. Local Port Forwarding will not show any logs or output when +successfully done. Also your should not close or terminate the terminal while +runnng the tunneling sessions and using the Admin UI.

+
+

Default Admin User:

+
Email:    admin@example.com
+Password: changeme
+
+

Immediately after logging in with this default user you will be asked to modify +your admin details and change your password.

+

How to create a Proxy Host with Let's Encrypt SSL Certificate attached to it

+

i. Click on Hosts >> Proxy Hosts, then click on "Add Proxy Host" button as shown +below:

+

Add Proxy Hosts

+

ii. On the popup box, enter your Domain Names (This need to be registed from your +research institution or purchased on other third party vendor services and your have +its administrative access)

+
+

Important Note

+

The Domain Name need to have an A Record pointing to the public floating +IP of your NERC VM where you are hosting the Nginx Proxy Manager!

+
+

Please fill out the following information on this popup box:

+
    +
  • +

    Scheme: http

    +
  • +
  • +

    Forward Hostname/IP: + <The Private-IP of your NERC VM where you are hosting the web services>

    +
  • +
  • +

    Forward Port: <Port exposed on your VM to the public>

    +
  • +
  • +

    Enable all toggles i.e. Cache Assets, Block Common Exploits, Websockets Support

    +
  • +
  • +

    Access List: Publicly Accessible

    +
  • +
+

For your reference, you can review your selection should looks like below with your +own Domain Name and other settings:

+

Add Proxy Hosts Settings

+

Also, select the SSL tab and then "Request a new SSL Certificate" with settings +as shown below:

+

Add Proxy Hosts SSL Settings

+

iii. Once Saved clicking the "Save" button. It should show you Status "Online" and +when you click on the created Proxy Host link it will load the web services with +https and domain name you defined i.e. https://<Your-Domain-Name>.

+

3. Using your local Research Computing (RC) department or academic institution's Central IT services

+

You need to contact and work with your Research Computing department or +academic institution's Central IT services to create A record for your hostname +that maps to the address of a Floating IP of your NERC virtual instance.

+

A record: The primary DNS record used to connect your domain to an IP address +that directs visitors to your website.

+

4. Using commercial DNS providers

+

Alternatively, you can purchase a fully registered domain name or host name from +commercial hosting providers and then register DNS records for your virtual instance +from commercial cloud servies i.e. AWS Route53, Azure DNS, CloudFlare, Google Cloud +Platform, GoDaddy, etc.

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/advanced-openstack-topics/domain-name-system/images/activate-your-account.png b/openstack/advanced-openstack-topics/domain-name-system/images/activate-your-account.png new file mode 100644 index 00000000..8ee1cc55 Binary files /dev/null and b/openstack/advanced-openstack-topics/domain-name-system/images/activate-your-account.png differ diff --git a/openstack/advanced-openstack-topics/domain-name-system/images/create-no-ip-account.png b/openstack/advanced-openstack-topics/domain-name-system/images/create-no-ip-account.png new file mode 100644 index 00000000..3fd2830e Binary files /dev/null and b/openstack/advanced-openstack-topics/domain-name-system/images/create-no-ip-account.png differ diff --git a/openstack/advanced-openstack-topics/domain-name-system/images/dashboard.png b/openstack/advanced-openstack-topics/domain-name-system/images/dashboard.png new file mode 100644 index 00000000..36b435fd Binary files /dev/null and b/openstack/advanced-openstack-topics/domain-name-system/images/dashboard.png differ diff --git a/openstack/advanced-openstack-topics/domain-name-system/images/floating-ip-to-hostname.png b/openstack/advanced-openstack-topics/domain-name-system/images/floating-ip-to-hostname.png new file mode 100644 index 00000000..df702eb4 Binary files /dev/null and b/openstack/advanced-openstack-topics/domain-name-system/images/floating-ip-to-hostname.png differ diff --git a/openstack/advanced-openstack-topics/domain-name-system/images/nginx-proxy-manager-add-proxy-host.png b/openstack/advanced-openstack-topics/domain-name-system/images/nginx-proxy-manager-add-proxy-host.png new file mode 100644 index 00000000..8754007e Binary files /dev/null and b/openstack/advanced-openstack-topics/domain-name-system/images/nginx-proxy-manager-add-proxy-host.png differ diff --git a/openstack/advanced-openstack-topics/domain-name-system/images/nginx-proxy-manager-proxy-host.png b/openstack/advanced-openstack-topics/domain-name-system/images/nginx-proxy-manager-proxy-host.png new file mode 100644 index 00000000..6bd88219 Binary files /dev/null and b/openstack/advanced-openstack-topics/domain-name-system/images/nginx-proxy-manager-proxy-host.png differ diff --git a/openstack/advanced-openstack-topics/domain-name-system/images/nginx-proxy-manager-ssl-setting.png b/openstack/advanced-openstack-topics/domain-name-system/images/nginx-proxy-manager-ssl-setting.png new file mode 100644 index 00000000..8c040b7d Binary files /dev/null and b/openstack/advanced-openstack-topics/domain-name-system/images/nginx-proxy-manager-ssl-setting.png differ diff --git a/openstack/advanced-openstack-topics/domain-name-system/images/no-ip-free-vs-paid.png b/openstack/advanced-openstack-topics/domain-name-system/images/no-ip-free-vs-paid.png new file mode 100644 index 00000000..6eef0ed3 Binary files /dev/null and b/openstack/advanced-openstack-topics/domain-name-system/images/no-ip-free-vs-paid.png differ diff --git a/openstack/advanced-openstack-topics/domain-name-system/images/signup.png b/openstack/advanced-openstack-topics/domain-name-system/images/signup.png new file mode 100644 index 00000000..9bddb9f8 Binary files /dev/null and b/openstack/advanced-openstack-topics/domain-name-system/images/signup.png differ diff --git a/openstack/advanced-openstack-topics/python-sdk/python-SDK/index.html b/openstack/advanced-openstack-topics/python-sdk/python-SDK/index.html new file mode 100644 index 00000000..756ee634 --- /dev/null +++ b/openstack/advanced-openstack-topics/python-sdk/python-SDK/index.html @@ -0,0 +1,4460 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

References

+

Python SDK page at PyPi

+

OpenStack Python SDK User Guide

+

From the Python SDK page at Pypi:

+
+

Definition

+

Python SDK is a client library for building applications to work with +OpenStack clouds. The project aims to provide a consistent and complete set of +interactions with OpenStack's many services, along with complete documentation, +examples, and tools.

+
+

If you need to plug OpenStack into existing scripts using another language, +there are a variety of other SDKs at various levels of active development.

+

A list of known SDKs is maintained on the official OpenStack wiki. +Known SDKs

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/advanced-openstack-topics/setting-up-a-network/create-a-router/index.html b/openstack/advanced-openstack-topics/setting-up-a-network/create-a-router/index.html new file mode 100644 index 00000000..f7ce43cc --- /dev/null +++ b/openstack/advanced-openstack-topics/setting-up-a-network/create-a-router/index.html @@ -0,0 +1,4545 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Create a Router

+

A router acts as a gateway for external connectivity.

+

By connecting your private network to the public network via a router, you can +connect your instance to the Internet, +install packages, etc. without needing to associate it with a public IP address.

+

You can view routers by clicking Project, then click Network panel and choose +Routers from the tabs that appears.

+

Click "Create Network" button on the right side of the screen.

+

In the Create Router dialog box, specify a name for the router.

+

From the External Network dropdown, select the ‘provider’ network, and click +"Create Router" button. This will set the Gateway for the new router to public network.

+

Create Router

+

The new router is now displayed in the Routers tab. You should now see the +router in the Network Topology view. (It also appears under Project -> Network +-> Routers).

+

Notice that it is now connected to the public network, but not your private network.

+

Router in Network

+

Set Internal Interface on the Router

+

In order to route between your private network and the outside world, you must +give the router an interface on your private network.

+

Perform the following steps in order to To connect a private network to the +newly created router:

+

a. On the Routers tab, click the name of the router.

+

Routers

+

b. On the Router Details page, click the Interfaces tab, then click Add Interface.

+

c. In the Add Interface dialog box, select a Subnet.

+

Add Interface

+

Optionally, in the Add Interface dialog box, set an IP Address for the router +interface for the selected subnet.

+

If you choose not to set the IP Address value, then by default OpenStack +Networking uses the first host IP address in the subnet.

+

The Router Name and Router ID fields are automatically updated.

+

d. Click "Add Interface".

+

The Router will now appear connected to the private network in Network Topology tab.

+

Router connected to Private Network

+

OR,

+

You can set Internal Interface on the Router From the Network Topology view, +click on the router you just created, and click ‘Add Interface’ on the popup +that appears.

+

Add Interface from Network Topology

+

Then, this will show Add Interface dialog box. So, you just complete steps b to +c as mentioned above.

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/advanced-openstack-topics/setting-up-a-network/images/create_network.png b/openstack/advanced-openstack-topics/setting-up-a-network/images/create_network.png new file mode 100644 index 00000000..345c63bb Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-a-network/images/create_network.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-a-network/images/create_router.png b/openstack/advanced-openstack-topics/setting-up-a-network/images/create_router.png new file mode 100644 index 00000000..1adfcba7 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-a-network/images/create_router.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-a-network/images/default-network.png b/openstack/advanced-openstack-topics/setting-up-a-network/images/default-network.png new file mode 100644 index 00000000..dc1eb4e5 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-a-network/images/default-network.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-a-network/images/network_blank.png b/openstack/advanced-openstack-topics/setting-up-a-network/images/network_blank.png new file mode 100644 index 00000000..d46e1138 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-a-network/images/network_blank.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-a-network/images/network_new.png b/openstack/advanced-openstack-topics/setting-up-a-network/images/network_new.png new file mode 100644 index 00000000..2ac1f2b1 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-a-network/images/network_new.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-a-network/images/network_router.png b/openstack/advanced-openstack-topics/setting-up-a-network/images/network_router.png new file mode 100644 index 00000000..9456a898 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-a-network/images/network_router.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-a-network/images/network_subnet.png b/openstack/advanced-openstack-topics/setting-up-a-network/images/network_subnet.png new file mode 100644 index 00000000..ec529eee Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-a-network/images/network_subnet.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-a-network/images/network_subnet_details.png b/openstack/advanced-openstack-topics/setting-up-a-network/images/network_subnet_details.png new file mode 100644 index 00000000..0ba55658 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-a-network/images/network_subnet_details.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-a-network/images/router_add_interface.png b/openstack/advanced-openstack-topics/setting-up-a-network/images/router_add_interface.png new file mode 100644 index 00000000..0d2ac9ac Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-a-network/images/router_add_interface.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-a-network/images/router_add_interface_from_topology.png b/openstack/advanced-openstack-topics/setting-up-a-network/images/router_add_interface_from_topology.png new file mode 100644 index 00000000..238779cc Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-a-network/images/router_add_interface_from_topology.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-a-network/images/router_private_network_topology.png b/openstack/advanced-openstack-topics/setting-up-a-network/images/router_private_network_topology.png new file mode 100644 index 00000000..63043ef0 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-a-network/images/router_private_network_topology.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-a-network/images/routers.png b/openstack/advanced-openstack-topics/setting-up-a-network/images/routers.png new file mode 100644 index 00000000..9e30f48f Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-a-network/images/routers.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-a-network/set-up-a-private-network/index.html b/openstack/advanced-openstack-topics/setting-up-a-network/set-up-a-private-network/index.html new file mode 100644 index 00000000..f9ca16ca --- /dev/null +++ b/openstack/advanced-openstack-topics/setting-up-a-network/set-up-a-private-network/index.html @@ -0,0 +1,4566 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Set up a Private Network

+
+

Default Network for your Project

+

During your project setup, NERC will setup a default network, router and interface +for your project that is ready-to-use.

+

Deafult Network Topology

+
+

Create Your Own Private Network

+

You can view/ create your/ existing network topology by clicking Project, then click +Network panel and choose Network Topology from the tabs that appears. This +shows public network which is accessible to all projects.

+

Network Topology

+

Click on "Networks" tab and then click "Create Network" button on the right +side of the screen.

+

In the Create Network dialog box, specify the following values.

+
    +
  • +

    Network tab:

    +

    Network Name: Specify a name to identify the network.

    +

    Admin State: The state to start the network in.

    +

    Create Subnet: Select this check box to create a subnet

    +

    Give your network a name, and leave the two checkboxes for "Admin State" and +"Create Subnet" with the default settings.

    +

    Create a Network

    +
  • +
  • +

    Subnet tab:

    +

    You do not have to specify a subnet when you create a network, but if you do +not specify a subnet, the network can not be attached to an instance.

    +

    Subnet Name: Specify a name for the subnet.

    +

    Network Address: Specify the IP address for the subnet. For your private +networks, you should use IP addresses which fall within the ranges that are +specifically reserved for private networks:

    +
    10.0.0.0/8
    +172.16.0.0/12
    +192.168.0.0/16
    +
    +

    In the example below, we configure a network containing addresses 192.168.0.1 +to 192.168.0.255 using CIDR 192.168.0.0/24 +Technically, your private network will still work if you choose any IP outside +these ranges, but this causes problems with connecting to IPs in the outside +world - so don't do it!

    +

    Network Topology

    +

    IP Version: Select IPv4 or IPv6.

    +

    Gateway IP: Specify an IP address for a specific gateway. This parameter is optional.

    +

    Disable Gateway: Select this check box to disable a gateway IP address.

    +
  • +
  • +

    Subnet Details tab

    +

    Enable DHCP: Select this check box to enable DHCP so that your VM instances +will automatically be assigned an IP on the subnet.

    +

    Allocation Pools: Specify IP address pools.

    +

    DNS Name Servers: Specify a name for the DNS server. If you use '8.8.8.8' (you +may recognize this as one of Google's public name servers).

    +

    Host Routes: Specify the IP address of host routes.

    +

    For now, you can leave the Allocation Pools and Host Routes boxes empty and +click on "Create" button. But here we specify Allocation Pools of 192.168.0.2,192.168.0.254.

    +

    Network Topology

    +

    The Network Topology should now show your virtual private network next to the +public network.

    +

    Newly Created Network Topology

    +
  • +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/how-to-build-windows-image/index.html b/openstack/advanced-openstack-topics/setting-up-your-own-images/how-to-build-windows-image/index.html new file mode 100644 index 00000000..0c281061 --- /dev/null +++ b/openstack/advanced-openstack-topics/setting-up-your-own-images/how-to-build-windows-image/index.html @@ -0,0 +1,5054 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Virtual Machine Image Guide

+

An OpenStack Compute cloud needs to have virtual machine images in order to +launch an instance. A virtual machine image is a single file which contains a +virtual disk that has a bootable operating system installed on it.

+
+

Very Important

+

The provided Windows Server 2022 R2 image is for evaluation only. This evaluation +edition expires in 180 days. This is intended to evaluate if the product +is right for you. This is on user discretion to update, extend, and handle +licensing issues for future usages.

+
+
+

How to extend activation grace period for another 180 days?

+

Remote desktop to your running Windows VM. Using the search function in your +taskbar, look up Command Prompt. When you see it in the results, right-click +on it and choose Run as Administrator. Your VM's current activation grace +period can be reset by running: slmgr -rearm. Once this command is run +successfully, restart your instance for the changes to take effect. This command +typically resets the activation timer to 180 days and can be performed only for +a limited number of times. For more about this read here.

+
+

Existing Microsoft Windows Image

+

Cloudbase Solutions provides Microsoft Windows Server 2022 R2 Standard +Evaluation for OpenStack. This +includes the required support for hypervisor-specific drivers (Hyper-V / KVM). +Also integrated are the guest initialization tools (Cloudbase-Init), security +updates, proper performance, and security configurations as well as the final Sysprep.

+

How to Build and Upload your custom Microsoft Windows Image

+
+

Overall Process

+

To create a new image, you will need the installation CD or DVD ISO file for +the guest operating system. You will also need access to a virtualization tool. +You can use KVM hypervisor for this. Or, if you have a GUI desktop virtualization +tool (such as, virt-manager, VMware Fusion or +VirtualBox), you can use that instead. Convert the file to QCOW2 (KVM, Xen) +once you are done.

+
+

You can customize and build the new image manually on your own system and then +upload the image to the NERC's OpenStack Compute cloud. Please follow the following +steps which describes how to obtain, create, and modify virtual machine images that +are compatible with the NERC's OpenStack.

+

1. Prerequisite

+

Follow these steps to prepare the installation

+

a. Download a Windows Server 2022 installation ISO file. Evaluation images are +available on the Microsoft website +(registration required).

+

b. Download the signed VirtIO drivers ISO file from the Fedora website.

+

c. Install Virtual Machine Manager on your +local Windows 10 machine using WSL:

+
    +
  • +

    Enable WSL on your local Windows 10 subsystem for Linux:

    +

    The steps given here are straightforward, however, before following them +make sure on Windows 10, you have WSL enabled and have at least Ubuntu +20.04 or above LTS version running over it. If you don’t know how to do +that then see our tutorial on how to enable WSL and install Ubuntu over +it.

    +
  • +
  • +

    Download and install MobaXterm:

    +

    MobaXterm is a free application that can be downloaded using this link. +After downloading, install it like any other normal Windows software.

    +
  • +
  • +

    Open MobaXterm and run WSL Linux:

    +

    As you open this advanced terminal for Windows 10, WSL installed Ubuntu +app will show on the left side panel of it. Double click on that to start +the WSL session.

    +

    MobaXterm WSL Ubuntu-20.04 LTS

    +
  • +
  • +

    Install Virt-Manager:

    +
    sudo apt update
    +sudo apt install virt-manager
    +
    +
  • +
  • +

    Run Virtual Machine Manager:

    +

    Start the Virtual Machine Manager running this command on the opened +terminal: virt-manager as shown below:

    +

    MobaXterm start Virt-Manager

    +

    This will open Virt-Manager as following:

    +

    Virt-Manager interface

    +
  • +
  • +

    Connect QEMU/KVM user session on Virt-Manager:

    +

    Virt-Manager Add Connection

    +

    Virt-Manager QEMU/KVM user session

    +

    Virt-Manager Connect

    +
  • +
+

2. Create a virtual machine

+

Create a virtual machine with the storage set to a 15 GB qcow2 disk image +using Virtual Machine Manager

+

Virt-Manager New Virtual Machine

+

Virt-Manager Local install media

+

Virt-Manager Browse Win ISO

+

Virt-Manager Browse Local

+

Virt-Manager Select the ISO file

+

Virt-Manager Selected ISO

+

Virt-Manager default Memory and CPU

+

Please set 15 GB disk image size as shown below:

+

Virt-Manager disk image size

+

Set the virtual machine name and also make sure "Customize configuration before +install" is selected as shown below:

+

Virt-Manager Virtual Machine Name

+

3. Customize the Virtual machine

+

Virt-Manager Customize Image

+

Enable the VirtIO driver. By default, the Windows installer does not +detect the disk.

+

Virt-Manager Disk with VirtIO driver

+

Virt-Manager Add Hardware

+

Click Add Hardware > select CDROM device and attach to downloaded +virtio-win-* ISO file:

+

Virt-Manager Add CDROM with virtio ISO

+

Virt-Manager Browse virtio ISO

+

Virt-Manager Select virtio ISO

+

Make sure the NIC is using the virtio Device model as shown below:

+

Virt-Manager Modify  NIC

+

Virt-Manager Apply Change on NIC

+

Make sure to set proper order of Boot Options as shown below, so that +CDROM with Windows ISO is set on the first and Apply the order change. +After this please begin windows installation by clicking on "Begin Installation" +button.

+

Windows Boot Options

+

Click "Apply" button.

+

4. Continue with the Windows installation

+

You need to continue with the Windows installation process.

+

When prompted you can choose "Windows Server 2022 Standard Evaluation (Desktop Experinece)" +option as shown below:

+

Windows Desktop Installation

+

Windows Custom Installation

+

Load VirtIO SCSI drivers and network drivers by choosing an installation +target when prompted. Click Load driver and browse the file system.

+

Windows Custom Load Driver

+

Browse Local Attached Drives

+

Select VirtIO CDROM

+

Select the E:\virtio-win-*\viostor\2k22\amd64 folder. When converting an +image file with Windows, ensure the virtio driver is installed. Otherwise, +you will get a blue screen when launching the image due to lack of the virtio +driver.

+

Select Appropriate Win Version viostor driver

+

The Windows installer displays a list of drivers to install. Select the +VirtIO SCSI drivers.

+

Windows viostor driver Installation

+

Click Load driver again and browse the file system, and select the +E:\NetKVM\2k22\amd64 folder.

+

Select Appropriate Win Version NetKVM driver

+

Select the network drivers, and continue the installation.

+

Windows NetKVM driver Installation

+

Windows Ready for Installation

+

Windows Continue Installation

+

5. Restart the installed virtual machine (VM)

+

Once the installation is completed, the VM restarts

+

Define a password for the Adminstrator when prompted and click on +"Finish" button:

+

Windows Administrator Login

+

Send the "Ctrl+Alt+Delete" key using Send Key Menu, this will +unlock the windows and then prompt login for the Administrator - please login +using the password you set on previous step:

+

Windows Send Key

+

Administrator Login

+

Administrator Profile Finalize

+

Windows Installation Successful

+

6. Go to device manager and install all unrecognized devices

+

Device Manager View

+

Device Manager Update Driver

+

Device Manager Browse Driver

+

Browse To Attached vitio-win CDROM

+

Select Attached vitio-win CDROM

+

Successfully Installed Driver

+

Similarly as shown above repeat and install all missing drivers.

+

7. Enable Remote Desktop Protocol (RDP) login

+

Explicitly enable RDP login and uncheck "Require computers to use Network +Level Authentication to connect" option

+

Enable RDP

+

Disable Network Level Authentication

+

8. Delete the recovery parition

+

Delete the recovery parition which will allow expanding the Image as required +running the following commands on Command Prompt (Run as Adminstrator)

+
diskpart
+select disk 0
+list partition
+select partition 3
+delete partition override
+list partition
+
+

Disk Partition 3 Delete using CMD

+

and then extend C: drive to take up the remaining space using "Disk Management".

+

C Drive Extended using Disk Management

+

C Drive Extended to Take all Unallocated Space

+

C Drive on DIsk Management

+

9. Install any new Windows updates. (Optional)

+

10. Setup cloudbase-init to generate QCOW2 image

+

Download and install stable version of cloudbase-init (A Windows project +providing guest initialization features, similar to cloud-init) by browsing the +Download Page on the web browser +on virtual machine running Windows, you can escape registering and just click +on "No. just show me the downloads" to navigate to the download page as +shown below:

+

Download Cloudbase-init

+

During Installation, set Serial port for logging to COM1 as shown below:

+

Download Cloudbase-init setup for Admin

+

When the installation is done, in the Complete the Cloudbase-Init Setup Wizard +window, select the Run Sysprep and Shutdown check boxes and click "Finish" +as shown below:

+

Cloudbase-init Final Setup Options

+

Wait for the machine to shutdown.

+

Sysprep Setup in Progress

+

11. Where is the newly generated QCOW2 image?

+

The Sysprep will generate QCOW2 image i.e. win2k22.qcow2 on /home/<YourUserName>/.local/share/libvirt/images/

+

Windows QCOW2 Image

+

12. Create OpenStack image and push to NERC's image list

+

You can copy/download this windows image to the folder where you configured your +OpenStack CLI as described Here and upload +to the NERC's OpenStack running the following OpenStack Image API command:

+
openstack image create --disk-format qcow2 --file win2k22.qcow2 MS-Windows-2022
+
+

You can verify the uploaded image is available by running:

+
openstack image list
+
++--------------------------------------+---------------------+--------+
+| ID                                   | Name                | Status |
++--------------------------------------+---------------------+--------+
+| a9b48e65-0cf9-413a-8215-81439cd63966 | MS-Windows-2022     | active |
+| ...                                  | ...                 | ...    |
++--------------------------------------+---------------------+--------+
+
+

13. Launch an instance using newly uploaded MS-Windows-2022 image

+

Login to the NERC's OpenStack and verify the +uploaded MS-Windows-2022 is there also available on the NERC's OpenStack +Images List for your project as shown below:

+

MS-Windows-2022 OpenStack Image

+

Create a Volume using that Windows Image:

+

MS-Winodws-2022 Image to Volume Create

+

Create Volume

+

Once successfully Volume is created, we can use the Volume to launch an instance +as shown below:

+

Launch Instance from Volume

+

Add other information and setup a Security Group that allows RDP (port: 3389) as +shown below:

+

Launch Instance Security Group for RDP

+

After some time the instance will be Active in Running state as shown below:

+

Running Windows Instance

+

Attach a Floating IP to your instance:

+

Associate Floating IP

+
+

More About Floating IP

+

If you don't have any available floating IPs, please refer to +this documentation +on how to allocate a new Floating IP to your project.

+
+

Click on detail view of the Instance and then click on Console tab menu +and click on "Send CtrlAltDel" button located on the top right side of +the console as shown below:

+

View Console of Instance

+

Administrator Sign in Prompt

+

Administrator Prompted to Change Password

+

Set Administrator Password

+

Proceed Changed Administrator Password

+

Administrator Password Changed Successful

+

14. How to have Remote Desktop login to your Windows instance

+

Remote Desktop login should work with the Floating IP associated with the instance:

+

Search Remote Desktop Protocol locally

+

Connect to Remote Instance using Floating IP

+

Prompted Administrator Login

+

Prompted RDP connection

+

Successfully Remote Connected Instance

+

For more detailed information about OpenStack's image management, the +OpenStack image creation guide +provides further references and details.

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/0.0.add_virtual_connection.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/0.0.add_virtual_connection.png new file mode 100644 index 00000000..bea24956 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/0.0.add_virtual_connection.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/0.1.select_qemu_kvm_user_session.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/0.1.select_qemu_kvm_user_session.png new file mode 100644 index 00000000..f9751e7d Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/0.1.select_qemu_kvm_user_session.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/0.2.qemu_kvm_user_session.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/0.2.qemu_kvm_user_session.png new file mode 100644 index 00000000..0594a8d7 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/0.2.qemu_kvm_user_session.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/0.virtual-manager.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/0.virtual-manager.png new file mode 100644 index 00000000..8619bc65 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/0.virtual-manager.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/1.new_virtual_machine.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/1.new_virtual_machine.png new file mode 100644 index 00000000..88bf0bb6 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/1.new_virtual_machine.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/10.browse_driver.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/10.browse_driver.png new file mode 100644 index 00000000..1d0af30a Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/10.browse_driver.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/11.browse_CDRom_virtio_iso.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/11.browse_CDRom_virtio_iso.png new file mode 100644 index 00000000..713e2a8a Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/11.browse_CDRom_virtio_iso.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/12.select_viostor_driver.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/12.select_viostor_driver.png new file mode 100644 index 00000000..bf7ab651 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/12.select_viostor_driver.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/13.install_viostor_driver.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/13.install_viostor_driver.png new file mode 100644 index 00000000..2e8409cc Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/13.install_viostor_driver.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/14.select_netkvm_driver.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/14.select_netkvm_driver.png new file mode 100644 index 00000000..a2b127dc Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/14.select_netkvm_driver.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/15.install_netkvm_driver.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/15.install_netkvm_driver.png new file mode 100644 index 00000000..9aed0c7b Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/15.install_netkvm_driver.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/16.install_win.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/16.install_win.png new file mode 100644 index 00000000..86be1132 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/16.install_win.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/17.wait_installation_finish.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/17.wait_installation_finish.png new file mode 100644 index 00000000..2afc7442 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/17.wait_installation_finish.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/2.select_local_ISO_image.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/2.select_local_ISO_image.png new file mode 100644 index 00000000..b2c360e0 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/2.select_local_ISO_image.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/3.0.Choose_ISO.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/3.0.Choose_ISO.png new file mode 100644 index 00000000..b8cb2bde Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/3.0.Choose_ISO.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/3.1.browse_local.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/3.1.browse_local.png new file mode 100644 index 00000000..197aa8df Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/3.1.browse_local.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/3.3.open_local_iso_file.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/3.3.open_local_iso_file.png new file mode 100644 index 00000000..4706fd57 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/3.3.open_local_iso_file.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/3.4.select_iso.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/3.4.select_iso.png new file mode 100644 index 00000000..43f3bd5e Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/3.4.select_iso.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/4.default_mem_cpu.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/4.default_mem_cpu.png new file mode 100644 index 00000000..4cc64ffe Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/4.default_mem_cpu.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/5.set_15_GB_disk_size.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/5.set_15_GB_disk_size.png new file mode 100644 index 00000000..575f9e7c Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/5.set_15_GB_disk_size.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/6.set_name.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/6.set_name.png new file mode 100644 index 00000000..3e8036a0 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/6.set_name.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/7.0.customize_iso.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/7.0.customize_iso.png new file mode 100644 index 00000000..e7db808a Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/7.0.customize_iso.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/7.1.customize_sata_disk_virtio.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/7.1.customize_sata_disk_virtio.png new file mode 100644 index 00000000..54a9ad8e Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/7.1.customize_sata_disk_virtio.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/7.2.customize_nic_virtio.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/7.2.customize_nic_virtio.png new file mode 100644 index 00000000..6720b7dd Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/7.2.customize_nic_virtio.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/7.3.customize_nic_virtio_apply.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/7.3.customize_nic_virtio_apply.png new file mode 100644 index 00000000..74674dc2 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/7.3.customize_nic_virtio_apply.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/7.4.add_virtio_iso_hardware.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/7.4.add_virtio_iso_hardware.png new file mode 100644 index 00000000..13210cd8 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/7.4.add_virtio_iso_hardware.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/7.5.add_virtio_iso_cdrom.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/7.5.add_virtio_iso_cdrom.png new file mode 100644 index 00000000..0dfec019 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/7.5.add_virtio_iso_cdrom.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/7.6.browse_virtio_iso.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/7.6.browse_virtio_iso.png new file mode 100644 index 00000000..97e3b2fb Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/7.6.browse_virtio_iso.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/7.7.select_virtion_iso.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/7.7.select_virtion_iso.png new file mode 100644 index 00000000..41e16519 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/7.7.select_virtion_iso.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/7.8.boot_option_win_cdrom_first.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/7.8.boot_option_win_cdrom_first.png new file mode 100644 index 00000000..5156292e Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/7.8.boot_option_win_cdrom_first.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/7.windows_installation_desktop.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/7.windows_installation_desktop.png new file mode 100644 index 00000000..0160d8a8 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/7.windows_installation_desktop.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/8.custom_setup.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/8.custom_setup.png new file mode 100644 index 00000000..2e708386 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/8.custom_setup.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/9.load_driver.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/9.load_driver.png new file mode 100644 index 00000000..7ad90fe9 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/9.load_driver.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/RDP_on_local_machine.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/RDP_on_local_machine.png new file mode 100644 index 00000000..68e9920c Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/RDP_on_local_machine.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/a.mobaxterm_ubuntu_WSL.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/a.mobaxterm_ubuntu_WSL.png new file mode 100644 index 00000000..f225fa19 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/a.mobaxterm_ubuntu_WSL.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/administrator_singin_prompt.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/administrator_singin_prompt.png new file mode 100644 index 00000000..30ddf2bd Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/administrator_singin_prompt.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/b.mobaxterm_init_virt-manager.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/b.mobaxterm_init_virt-manager.png new file mode 100644 index 00000000..fb38bdd3 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/b.mobaxterm_init_virt-manager.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/browse_driver.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/browse_driver.png new file mode 100644 index 00000000..1151cebf Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/browse_driver.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/browse_driver_CDROM.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/browse_driver_CDROM.png new file mode 100644 index 00000000..648245d5 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/browse_driver_CDROM.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/c_drive_extended_to_take_all_unallocated_space.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/c_drive_extended_to_take_all_unallocated_space.png new file mode 100644 index 00000000..fc594160 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/c_drive_extended_to_take_all_unallocated_space.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/cloudinit-final-setup.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/cloudinit-final-setup.png new file mode 100644 index 00000000..ed71c365 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/cloudinit-final-setup.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/coludbase-init-serial-port-com1.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/coludbase-init-serial-port-com1.png new file mode 100644 index 00000000..7bd3f398 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/coludbase-init-serial-port-com1.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/console_win_instance.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/console_win_instance.png new file mode 100644 index 00000000..980438ad Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/console_win_instance.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/create_volume.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/create_volume.png new file mode 100644 index 00000000..a4cf4fdf Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/create_volume.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/device-manager-update-drivers.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/device-manager-update-drivers.png new file mode 100644 index 00000000..97566164 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/device-manager-update-drivers.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/disk_partition_manager_delete_partition_3.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/disk_partition_manager_delete_partition_3.png new file mode 100644 index 00000000..5db62e01 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/disk_partition_manager_delete_partition_3.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/download_icow2_win2022_image.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/download_icow2_win2022_image.png new file mode 100644 index 00000000..8f24edd0 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/download_icow2_win2022_image.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/extend_C_drive_using_disk_manager.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/extend_C_drive_using_disk_manager.png new file mode 100644 index 00000000..30fa0cb0 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/extend_C_drive_using_disk_manager.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/finalize_win_installtion_with_user.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/finalize_win_installtion_with_user.png new file mode 100644 index 00000000..b4a7a4eb Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/finalize_win_installtion_with_user.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/install_cloudbase-init.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/install_cloudbase-init.png new file mode 100644 index 00000000..0796215b Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/install_cloudbase-init.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/installed_driver.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/installed_driver.png new file mode 100644 index 00000000..e9243825 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/installed_driver.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/launch_instance_from_volume.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/launch_instance_from_volume.png new file mode 100644 index 00000000..b4497ece Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/launch_instance_from_volume.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/login_administrator.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/login_administrator.png new file mode 100644 index 00000000..71784bb6 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/login_administrator.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/new_c_drive.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/new_c_drive.png new file mode 100644 index 00000000..3e737403 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/new_c_drive.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/new_password_administrator.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/new_password_administrator.png new file mode 100644 index 00000000..af2dc2c4 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/new_password_administrator.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/ok_to_change_password_administrator.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/ok_to_change_password_administrator.png new file mode 100644 index 00000000..4efd92f7 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/ok_to_change_password_administrator.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/password_changed_success.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/password_changed_success.png new file mode 100644 index 00000000..97486a49 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/password_changed_success.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/proceed_change_password_administrator.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/proceed_change_password_administrator.png new file mode 100644 index 00000000..883a3731 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/proceed_change_password_administrator.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/prompted_administrator_login.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/prompted_administrator_login.png new file mode 100644 index 00000000..dc7da852 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/prompted_administrator_login.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/prompted_rdp_connection.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/prompted_rdp_connection.png new file mode 100644 index 00000000..a5b69f5d Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/prompted_rdp_connection.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/rdp-enable.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/rdp-enable.png new file mode 100644 index 00000000..45db2afb Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/rdp-enable.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/rdp-network-level-auth-not-required.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/rdp-network-level-auth-not-required.png new file mode 100644 index 00000000..d48ed1e8 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/rdp-network-level-auth-not-required.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/remote_connected_instance.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/remote_connected_instance.png new file mode 100644 index 00000000..e79ca6d9 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/remote_connected_instance.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/remote_connection_floating_ip.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/remote_connection_floating_ip.png new file mode 100644 index 00000000..2e11ce9b Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/remote_connection_floating_ip.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/security_group_for_rdp.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/security_group_for_rdp.png new file mode 100644 index 00000000..18ec5878 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/security_group_for_rdp.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/select_attached_driver.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/select_attached_driver.png new file mode 100644 index 00000000..8d4680e4 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/select_attached_driver.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/send_ctrl_alt_delete_key.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/send_ctrl_alt_delete_key.png new file mode 100644 index 00000000..aa54a47d Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/send_ctrl_alt_delete_key.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/setup_admininstrator_profile.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/setup_admininstrator_profile.png new file mode 100644 index 00000000..6dc916e5 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/setup_admininstrator_profile.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/stack_image_to_volume.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/stack_image_to_volume.png new file mode 100644 index 00000000..30e2e242 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/stack_image_to_volume.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/stack_images_windows.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/stack_images_windows.png new file mode 100644 index 00000000..bb47c44b Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/stack_images_windows.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/sysprep_in_progress.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/sysprep_in_progress.png new file mode 100644 index 00000000..f0839142 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/sysprep_in_progress.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/update_driver.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/update_driver.png new file mode 100644 index 00000000..21bae4e2 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/update_driver.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/win2k22_instance_running.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/win2k22_instance_running.png new file mode 100644 index 00000000..2660a499 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/win2k22_instance_running.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/win_instance_add_floating_ip.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/win_instance_add_floating_ip.png new file mode 100644 index 00000000..3ca90b3d Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/win_instance_add_floating_ip.png differ diff --git a/openstack/advanced-openstack-topics/setting-up-your-own-images/images/windows-successful-login.png b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/windows-successful-login.png new file mode 100644 index 00000000..d04b8cc3 Binary files /dev/null and b/openstack/advanced-openstack-topics/setting-up-your-own-images/images/windows-successful-login.png differ diff --git a/openstack/advanced-openstack-topics/terraform/images/Ansible vs Terraform.jfif b/openstack/advanced-openstack-topics/terraform/images/Ansible vs Terraform.jfif new file mode 100644 index 00000000..8e5de867 Binary files /dev/null and b/openstack/advanced-openstack-topics/terraform/images/Ansible vs Terraform.jfif differ diff --git a/openstack/advanced-openstack-topics/terraform/images/NERC-Terrform.png b/openstack/advanced-openstack-topics/terraform/images/NERC-Terrform.png new file mode 100644 index 00000000..c21d781f Binary files /dev/null and b/openstack/advanced-openstack-topics/terraform/images/NERC-Terrform.png differ diff --git a/openstack/advanced-openstack-topics/terraform/terraform-on-NERC/index.html b/openstack/advanced-openstack-topics/terraform/terraform-on-NERC/index.html new file mode 100644 index 00000000..9cb0cedc --- /dev/null +++ b/openstack/advanced-openstack-topics/terraform/terraform-on-NERC/index.html @@ -0,0 +1,4867 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Provisioning the NERC resources using Terraform

+

Terraform +is an open-source Infrastructure as Code (IaC) software tool that works +with NERC and allows you to orchestrate, provision, and manage infrastructure +resources quickly and easily. Terraform codifies cloud application programming +interfaces (APIs) into human-readable, declarative configuration (*.tf) files. +These files are used to manage underlying infrastructure rather than through +NERC's web-based graphical interface - Horizon. +Terraform allows you to build, change, and manage your infrastructure in a safe, +consistent, and repeatable way by defining resource configurations that you can +version, reuse, and share. Terraform’s main job is to create, modify, and destroy +compute instances, private networks and other NERC resources.

+

Benefits of Terraform

+

If you have multiple instances/ VMs you are managing for your work or research, +it can be simpler and more reproducible if you are doing it with automation tool +like Terraform.

+

Installing Terraform

+

To use Terraform you will need to install it from here.

+

Basic Template to use Terraform on your NERC Project

+

You can Git clone: git clone https://github.com/nerc-project/terraform-nerc.git +and run our base template for terraform to provision some basic NERC's OpenStack +resources using this terraform-nerc repo.

+
+

Note

+

The main branch of this git repo should be a good starting point in developing +your own terraform code.

+
+

Template to setup R Shiny server using Terraform on your NERC Project

+

You can Git clone: git clone https://github.com/nerc-project/terraform-nerc-r-shiny.git +and can run this template locally using terraform to provision +R Shiny server on NERC's +OpenStack resources using this terraform-nerc-r-shiny repo.

+
+

Important Note

+

Please make sure to review bash script file i.e. install-R-Shiny.sh located +in this repo that is pointing as user-data-path variable in example.tfvars. +This repo includes the script required to setup Shiny R server. You can use +similar concept to any other project that needs custom user defined scripts +while launching an instance. If you want to change and update this script you +can just change this file and then run terraform plan and terraform apply +command pointing this example.tfvars file.

+
+

How Terraform Works

+

Terraform reads configuration files and provides an execution plan of changes, which +can be reviewed for safety and then applied and provisioned. Terraform reads all +files with the extension .tf in your current directory. Resources can be in a +single file, or organised across several different files.

+

The basic Terraform deployment workflow is:

+

i. Scope - Identify the infrastructure for your project.

+

ii. Author - Write the configuration for your infrastructure in which you +declare the elements of your infrastructure that you want to create.

+

The format of the resource definition is straightforward and looks like this:

+
resource type_of_resource "resource name" {
+    attribute = "attribue value"
+    ...
+}
+
+

iii. Initialize - Install the plugins Terraform needs to manage the infrastructure.

+

iv. Plan - Preview the changes Terraform will make to match your configuration.

+

v. Apply - Make the planned changes.

+

Running Terraform

+

The Terraform deployment workflow on the NERC looks like this:

+

Automating NERC resources using Terraform

+

Prerequisite

+
    +
  1. +

    You can download the "NERC's OpenStack RC File" with the credentials for + your NERC project from the NERC's OpenStack dashboard. + Then you need to source that RC file using: source *-openrc.sh. You can + read here + on how to do this.

    +
  2. +
  3. +

    Setup SSH key pairs running ssh-keygen -t rsa -f username-keypair and then + make sure the newly generated SSH key pairs exist on your ~/.ssh folder.

    +
  4. +
+

Terraform Init

+

The first command that should be run after writing a new Terraform configuration +or cloning an existing one is terraform init. This command is used to initialize +a working directory containing Terraform configuration files and install the plugins.

+
+

Information

+

You will need to run terraform init if you make any changes to providers.

+
+

Terraform Plan

+

terraform plan command creates an execution plan, which lets you preview the changes +that Terraform plans to make to your infrastructure based on your configuration files.

+

Terraform Apply

+

When you use terraform apply without passing it a saved plan file, it incorporates +the terraform plan command functionality and so the planning options are also +available while running this command.

+

Input Variables on the Command Line

+

You can use the -var 'NAME=VALUE' command line option to specify values for input +variables declared in your root module for e.g. terraform plan -var 'name=value'

+

In most cases, it will be more convenient to set values for potentially many input +variables declared in the root module of the configuration, using definitions from +a "tfvars" file and use it using -var-file=FILENAME command for e.g. +terraform plan -var-file=FILENAME

+

Track your infrastructure and Collaborate

+

Terraform keeps track of your real infrastructure in a state file, which acts as +a source of truth for your environment. Terraform uses the state file to determine +the changes to make to your infrastructure so that it will match your configuration. +Terraform's state allows you to track resource changes throughout your deployments. +You can securely share your state with your teammates, provide a stable environment +for Terraform to run in, and prevent race conditions when multiple people make +configuration changes at once.

+

Some useful Terraform commands

+
terraform init
+
+terraform fmt
+
+terraform validate
+
+terraform plan
+
+terraform apply
+
+terraform show
+
+terraform destroy
+
+terraform output
+
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/backup/backup-with-snapshots/index.html b/openstack/backup/backup-with-snapshots/index.html new file mode 100644 index 00000000..e2e8c88d --- /dev/null +++ b/openstack/backup/backup-with-snapshots/index.html @@ -0,0 +1,5047 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Backup with snapshots

+

When you start a new instance, you can choose the Instance Boot Source from the +following list:

+
    +
  • +

    boot from image

    +
  • +
  • +

    boot from instance snapshot

    +
  • +
  • +

    boot from volume

    +
  • +
  • +

    boot from volume snapshot

    +
  • +
+

In its default configuration, when the instance is launched from an Image or +an Instance Snapshot, the choice for utilizing persistent storage is configured +by selecting the Yes option for "Create New Volume". Additionally, the "Delete +Volume on Instance Delete" setting is pre-set to No, as indicated here:

+

Launching an Instance Boot Source

+
+

Very Important: How do you make your VM setup and data persistent?

+

For more in-depth information on making your VM setup and data persistent, +you can explore the details here.

+
+

Create and use Instance snapshots

+

The OpenStack snapshot mechanism allows you to create new images from your instances +while they are either running or stopped. An instance snapshot captures the current +state of a running VM along with its storage, configuration, and memory. It includes +the VM's disk image, memory state, and any configuration settings. Useful for +preserving the entire state of a VM, including its running processes and in-memory +data.

+

This mainly serves two purposes:

+
    +
  • +

    As a backup mechanism: save the main disk of your instance to an image in + Horizon dashboard under Project -> Compute -> Images and later boot a new instance + from this image with the saved data.

    +
  • +
  • +

    As a templating mechanism: customise and upgrade a base image and save it to + use as a template for new instances.

    +
  • +
+
+

Considerations: using Instance snapshots

+

It consumes more storage space due to including memory state. So, make sure +your resource allocations for Storage is sufficient to hold all. They are +suitable for scenarios where maintaining the exact VM state is crucial. The +creation time of instance snapshot will be proportional to the size of the +VM state.

+
+

How to create an instance snapshot

+

Using the CLI

+

Prerequisites:

+

To run the OpenStack CLI commands, you need to have:

+ +

To snapshot an instance to an image using the CLI, do this:

+
Using the openstack client
+
openstack server image create --name <name of my snapshot> --wait <instance name or uuid>
+
+
To view newly created snapshot image
+
openstack image show --fit-width <name of my snapshot>
+
+

Using this snapshot, the VM can be rolled back to the previous state with a +server rebuild.

+
openstack server rebuild --image <name of my snapshot> <existing instance name or uuid>
+
+

For e.g.

+
openstack server image create --name my-snapshot --wait test-nerc-0
+
+openstack image show --fit-width my-snapshot
+
+openstack server rebuild --image my-snapshot test-nerc-0
+
+
+

Important Information

+

During the time it takes to do the snapshot, the machine can become unresponsive.

+
+

Using Horizon dashboard

+

Once you're logged in to NERC's Horizon dashboard, you can create a snapshot via +the "Compute -> Instances" page by clicking on the "Create snapshot" action button +on desired instance as shown below:

+

Create Instance Snapshot

+

Instance Snapshot Information

+
+

Live snapshots and data consistency

+

We call a snapshot taken against a running instance with no downtime a +"live snapshot". These snapshots are simply disk-only snapshots, and may be +inconsistent if the instance's OS is not aware of the snapshot being taken. +This is why we highly recommend, if possible, to Shut Off the instance +before creating snapshots.

+
+

How to restore from Instance snapshot

+

Once created, you can find the image listed under Images in the Horizon dashboard.

+

Navigate to Project -> Compute -> Images.

+

Snapshot Instance Created

+

You have the option to launch this image as a new instance, or by clicking on the +arrow next to Launch, create a volume from the image, edit details about the +image, update the image metadata, or delete it:

+

Snapshot Instance Options

+

You can then select the snapshot when creating a new instance or directly click +"Launch" button to use the snapshot image to launch a new instance.

+

Take and use Volume Snapshots

+

Volume snapshots

+

You can also create snapshots of a volume, that then later can be used to +create other volumes or to rollback to a precedent point in time. You can take +a snapshot of volume that may or may not be attached to an instance. Snapshot of +available volumes or volumes that are not attached to an instance does not affect +the data on the volume. Snapshot of a volume serves as a backup for the persistent +data on the volume at a given point in time. Snapshots are of the size of the +actual data existing on the volume at the time at which the snapshot is taken. +Volume snapshots are pointers in the RW history of a volume. The creation of a +snapshot takes a few seconds and it can be done while the volume is in-use.

+
+

Warning

+

Taking snapshots of volumes that are in use or attached to active instances +can result in data inconsistency on the volume. This is why we highly recommend, +if possible, to Shut Off the instance before creating snapshots.

+
+

Once you have the snapshot, you can use it to create other volumes based on +this snapshot. Creation time for these volumes may depend on the type of the +volume you are creating as it may entitle some data transfer. But this is efficient +for backup and recovery of specific data without the need for the complete VM state. +Also, it consumes less storage space compared to instance snapshots.

+

How to create a volume snapshot

+

Using the OpenStack CLI

+

Prerequisites:

+

To run the OpenStack CLI commands, you need to have:

+ +

To snapshot an instance to an image using the CLI, do this:

+
Using the openstack client commands
+
openstack volume snapshot create --volume <volume name or uuid> <name of my snapshot>
+
+

For e.g.

+
openstack volume snapshot create --volume test_volume my-volume-snapshot
++-------------+--------------------------------------+
+| Field       | Value                                |
++-------------+--------------------------------------+
+| created_at  | 2022-04-12T19:48:42.707250           |
+| description | None                                 |
+| id          | f1cf6846-4aba-4eb8-b3e4-2ff309f8f599 |
+| name        | my-volume-snapshot                   |
+| properties  |                                      |
+| size        | 25                                   |
+| status      | creating                             |
+| updated_at  | None                                 |
+| volume_id   | f2630d21-f8f5-4f02-adc7-14a3aa72cc9d |
++-------------+--------------------------------------+
+
+
+

Important Information

+

if the volume is in-use, you may need to specify --force

+
+

You can list the volume snapshots with the following command.

+
openstack volume snapshot list
+
+

For e.g.

+
openstack volume snapshot list
++--------------------------------------+--------------------+-------------+-----------+------+
+| ID                                   | Name               | Description | Status    | Size |
++--------------------------------------+--------------------+-------------+-----------+------+
+| f1cf6846-4aba-4eb8-b3e4-2ff309f8f599 | my-volume-snapshot | None        | available |   25 |
++--------------------------------------+--------------------+-------------+-----------+------+
+
+

Once the volume snapshot is in available state, then you can create other volumes +based on that snapshot. You don't need to specify the size of the volume, it will +use the size of the snapshot.

+
openstack volume create --description --source <name of my snapshot> "Volume from an snapshot" <volume name or uuid>
+
+

You can delete the snapshots just by issuing the following command

+
openstack volume snapshot delete <name of my snapshot>
+
+

For e.g.

+
openstack volume snapshot delete my-volume-snapshot
+
+

Using NERC's Horizon dashboard

+

Once you're logged in to NERC's Horizon dashboard, you can create a snapshot via +the "Volumes" menu by clicking on the "Create Snapshot" action button +on desired volume as shown below:

+

Create Volume Snapshot

+

In the dialog box that opens, enter a snapshot name and a brief description.

+

Volume Snapshot Information

+

How to restore from Volume snapshot

+

Once a snapshot is created and is in "Available" status, you can view and manage +it under the Volumes menu in the Horizon dashboard under Volume Snapshots.

+

Navigate to Project -> Volumes -> Snapshots.

+

Volume Snapshots List

+

You have the option to directly launch this volume as an instance by clicking on +the arrow next to "Create Volume" and selecting "Launch as Instance".

+

Launch an Instance from Volume Snapshot

+

Also it has other options i.e. to create a volume from the snapshot, edit details +about the snapshot, delete it, or Update the snapshot metadata.

+

Here, we will first Create Volume from Snapshot by clicking "Create Volume" button +as shown below:

+

Create Volume from Volume Snapshot

+

In the dialog box that opens, enter a volume name and a brief description.

+

Create Volume Popup

+

Any snapshots made into volumes can be found under Volumes:

+

Navigate to Project -> Volumes -> Volumes.

+

New Volume from Volume Snapshot

+

Then using this newly created volume, you can launch it as an instance by clicking +on the arrow next to "Edit Volume" and selecting "Launch as Instance" as shown +below:

+

Launch an Instance from Volume

+
+

Very Important: Requested/Approved Allocated Storage Quota and Cost

+

Please remember that any volumes and snapshots stored will consume your +Storage quotas, which represent the storage space allocated to your project. +For NERC (OpenStack) Resource Allocations, storage quotas are specified +by the "OpenStack Volume Quota (GiB)" and "OpenStack Swift Quota (GiB)" +allocation attributes. You can delete any volumes and snapshots that are no +longer needed to free up space. However, even if you delete volumes and snapshots, +you will still be billed based on your approved and reserved storage allocation, +which reserves storage from the total NESE storage pool.

+

If you request additional storage by specifying a changed quota value for +the "OpenStack Volume Quota (GiB)" and "OpenStack Swift Quota (GiB)" +allocation attributes through NERC's ColdFront interface, +invoicing for the extra storage will take place upon fulfillment or approval +of your request, as explained in our +Billing FAQs.

+

Conversely, if you request a reduction in the Storage quotas, specified +by the "OpenStack Volume Quota (GiB)" and "OpenStack Swift Quota (GiB)", +through a change request using ColdFront, +your invoicing will be adjusted accordingly when the request is submitted.

+

In both scenarios, 'invoicing' refers to the accumulation of hours +corresponding to the added or removed storage quantity.

+
+
+

Help Regarding Billing

+

Please send your questions or concerns regarding Storage and Cost by emailing +us at help@nerc.mghpcc.org +or, by submitting a new ticket at the NERC's Support Ticketing System.

+
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/backup/images/create-instance-snapshot.png b/openstack/backup/images/create-instance-snapshot.png new file mode 100644 index 00000000..828a4d72 Binary files /dev/null and b/openstack/backup/images/create-instance-snapshot.png differ diff --git a/openstack/backup/images/create-volume-from-volume-snapshot-info.png b/openstack/backup/images/create-volume-from-volume-snapshot-info.png new file mode 100644 index 00000000..42fa961b Binary files /dev/null and b/openstack/backup/images/create-volume-from-volume-snapshot-info.png differ diff --git a/openstack/backup/images/create-volume-from-volume-snapshot.png b/openstack/backup/images/create-volume-from-volume-snapshot.png new file mode 100644 index 00000000..eb37419c Binary files /dev/null and b/openstack/backup/images/create-volume-from-volume-snapshot.png differ diff --git a/openstack/backup/images/instance-boot-source-options.png b/openstack/backup/images/instance-boot-source-options.png new file mode 100644 index 00000000..3bd0b6d1 Binary files /dev/null and b/openstack/backup/images/instance-boot-source-options.png differ diff --git a/openstack/backup/images/instance-image-snapshot.png b/openstack/backup/images/instance-image-snapshot.png new file mode 100644 index 00000000..dafd7b97 Binary files /dev/null and b/openstack/backup/images/instance-image-snapshot.png differ diff --git a/openstack/backup/images/instance-snapshot-info.png b/openstack/backup/images/instance-snapshot-info.png new file mode 100644 index 00000000..9eff3cc7 Binary files /dev/null and b/openstack/backup/images/instance-snapshot-info.png differ diff --git a/openstack/backup/images/launch-instance-from-volume-snapshot.png b/openstack/backup/images/launch-instance-from-volume-snapshot.png new file mode 100644 index 00000000..000a8227 Binary files /dev/null and b/openstack/backup/images/launch-instance-from-volume-snapshot.png differ diff --git a/openstack/backup/images/launch_instance_from_volume.png b/openstack/backup/images/launch_instance_from_volume.png new file mode 100644 index 00000000..105cecbf Binary files /dev/null and b/openstack/backup/images/launch_instance_from_volume.png differ diff --git a/openstack/backup/images/new-volume-from-snapshot.png b/openstack/backup/images/new-volume-from-snapshot.png new file mode 100644 index 00000000..1174b44a Binary files /dev/null and b/openstack/backup/images/new-volume-from-snapshot.png differ diff --git a/openstack/backup/images/snapshot-instance-options.png b/openstack/backup/images/snapshot-instance-options.png new file mode 100644 index 00000000..fb0b8b8f Binary files /dev/null and b/openstack/backup/images/snapshot-instance-options.png differ diff --git a/openstack/backup/images/volume-create-snapshot.png b/openstack/backup/images/volume-create-snapshot.png new file mode 100644 index 00000000..e5d8127d Binary files /dev/null and b/openstack/backup/images/volume-create-snapshot.png differ diff --git a/openstack/backup/images/volume-snapshot-info.png b/openstack/backup/images/volume-snapshot-info.png new file mode 100644 index 00000000..ebdba2ba Binary files /dev/null and b/openstack/backup/images/volume-snapshot-info.png differ diff --git a/openstack/backup/images/volume-snapshots-list.png b/openstack/backup/images/volume-snapshots-list.png new file mode 100644 index 00000000..6708c47a Binary files /dev/null and b/openstack/backup/images/volume-snapshots-list.png differ diff --git a/openstack/create-and-connect-to-the-VM/assign-a-floating-IP/index.html b/openstack/create-and-connect-to-the-VM/assign-a-floating-IP/index.html new file mode 100644 index 00000000..a68b7b4a --- /dev/null +++ b/openstack/create-and-connect-to-the-VM/assign-a-floating-IP/index.html @@ -0,0 +1,4618 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Assign a Floating IP

+

When an instance is created in OpenStack, it is automatically assigned a fixed +IP address in the network to which the instance is assigned. This IP address is +permanently associated with the instance until the instance is terminated.

+

However, in addition to the fixed IP address, a Floating IP address can also be +attached to an instance. Unlike fixed IP addresses, Floating IP addresses can +have their associations modified at any time, regardless of the state of the +instances involved. Floating IPs are a limited resource, so your project will +have a quota based on its needs. +You should only assign public IPs to VMs that need them. This procedure details +the reservation of a Floating IP address from an existing pool of addresses and +the association of that address with a specific instance.

+

By attaching a Floating IP to your instance, you can ssh into your vm from your +local machine.

+

Make sure you are using key forwarding as described in Create a Key Pair.

+

Allocate a Floating IP

+

Navigate to Project -> Compute -> Instances.

+

Next to Instance Name -> Click Actions dropdown arrow (far right) -> Choose +Associate Floating IP

+

Floating IP Associate

+

If you have some floating IPs already allocated to your project which are not +yet associated with a VM, they will be available in the dropdown list on this +screen.

+

Floating IP Successfully Allocated

+

If you have no floating IPs allocated, or all your allocated IPs are in use +already, the dropdown list will be empty.

+

Floating IP Not Available

+

Click the "+" icon to allocate an IP. You will see the following screen.

+

Floating IP Allocated

+

Make sure 'provider' appears in the dropdown menu, and that you have not +already met your quota of allocated IPs.

+

In this example, the project has a quota of 50 floating IPs, but we have +allocated 5 so far, so we can still allocate up to next 45 Floating IPs.

+

Click "Allocate IP".

+

You will get a green "success" popup in the top right corner that shows your +public IP address and that is listed as option to choose from "IP Address" dropdown +list.

+

Floating IP Successfully Allocated

+

You will be able to select between multiple Floating IPs under "IP Address" +dropdown and any unassociated VMs from "Port to be associated" dropdown options:

+

Floating IP Successfully Allocated

+

Now click on "Associate" button.

+

Then, a green "success" popup in the top left +and you can see the Floating IP is attached to your VM on the Instances page:

+

Floating IP Successfully Associated

+
+

Floating IP Quota Exceed

+

If you have already exceed your quota, you will get a red error message +saying "You are already using all of your available floating IPs" as shown below:

+

Floating IP Quota Exceed

+

NOTE: By default, each approved project is provided with only 2 OpenStack +Floating IPs, regardless of the units requested in the quota, as +described here. +Your PI or Project Manager(s) can adjust the quota and request additional +Floating IPs as needed, following this documentation. +This is controlled by the "OpenStack Floating IP Quota" attribute.

+
+

Disassociate a Floating IP

+

You may need to disassociate a Floating IP from an instance which no longer +needs it, so you can assign it to one that does.

+

Navigate to Project -> Compute -> Instances.

+

Find the instance you want to remove the IP from in the list. Click the red +"Disassociate Floating IP" to the right.

+

This IP will be disassociated from the instance, but it will still remain +allocated to your project.

+

Floating IP Disassociate

+

Release a Floating IP

+

You may discover that your project does not need all the floating IPs that are +allocated to it.

+

We can release a Floating IP while disassociating it just we need to check the +"Release Floating IP" option as shown here:

+

Floating IP Successfully Disassociated

+

OR,

+

Navigate to Project -> Network -> Floating IPs.

+

To release the Floating IP address back into the Floating IP pool, click the +Release Floating IP option in the Actions column.

+

Release Floating IP

+
+

Pro Tip

+

You can also choose multiple Floating IPs and release them all at once.

+
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/create-and-connect-to-the-VM/bastion-host-based-ssh/images/bastion_host_demo_sg.png b/openstack/create-and-connect-to-the-VM/bastion-host-based-ssh/images/bastion_host_demo_sg.png new file mode 100644 index 00000000..7e470006 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/bastion-host-based-ssh/images/bastion_host_demo_sg.png differ diff --git a/openstack/create-and-connect-to-the-VM/bastion-host-based-ssh/images/bastion_host_security_group.png b/openstack/create-and-connect-to-the-VM/bastion-host-based-ssh/images/bastion_host_security_group.png new file mode 100644 index 00000000..2a7ae686 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/bastion-host-based-ssh/images/bastion_host_security_group.png differ diff --git a/openstack/create-and-connect-to-the-VM/bastion-host-based-ssh/images/bastion_host_ssh_tunnel.png b/openstack/create-and-connect-to-the-VM/bastion-host-based-ssh/images/bastion_host_ssh_tunnel.png new file mode 100644 index 00000000..a8449770 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/bastion-host-based-ssh/images/bastion_host_ssh_tunnel.png differ diff --git a/openstack/create-and-connect-to-the-VM/bastion-host-based-ssh/images/floating_ip.png b/openstack/create-and-connect-to-the-VM/bastion-host-based-ssh/images/floating_ip.png new file mode 100644 index 00000000..6809f131 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/bastion-host-based-ssh/images/floating_ip.png differ diff --git a/openstack/create-and-connect-to-the-VM/bastion-host-based-ssh/images/private1_sg.png b/openstack/create-and-connect-to-the-VM/bastion-host-based-ssh/images/private1_sg.png new file mode 100644 index 00000000..e8894642 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/bastion-host-based-ssh/images/private1_sg.png differ diff --git a/openstack/create-and-connect-to-the-VM/bastion-host-based-ssh/images/private_instances_sg.png b/openstack/create-and-connect-to-the-VM/bastion-host-based-ssh/images/private_instances_sg.png new file mode 100644 index 00000000..ad3c7d23 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/bastion-host-based-ssh/images/private_instances_sg.png differ diff --git a/openstack/create-and-connect-to-the-VM/bastion-host-based-ssh/images/security_groups.png b/openstack/create-and-connect-to-the-VM/bastion-host-based-ssh/images/security_groups.png new file mode 100644 index 00000000..69302b86 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/bastion-host-based-ssh/images/security_groups.png differ diff --git a/openstack/create-and-connect-to-the-VM/bastion-host-based-ssh/images/select_bastion_sg_as_remote.png b/openstack/create-and-connect-to-the-VM/bastion-host-based-ssh/images/select_bastion_sg_as_remote.png new file mode 100644 index 00000000..9d8eaa5a Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/bastion-host-based-ssh/images/select_bastion_sg_as_remote.png differ diff --git a/openstack/create-and-connect-to-the-VM/bastion-host-based-ssh/images/ssh_connection_successful.png b/openstack/create-and-connect-to-the-VM/bastion-host-based-ssh/images/ssh_connection_successful.png new file mode 100644 index 00000000..d559b22b Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/bastion-host-based-ssh/images/ssh_connection_successful.png differ diff --git a/openstack/create-and-connect-to-the-VM/bastion-host-based-ssh/index.html b/openstack/create-and-connect-to-the-VM/bastion-host-based-ssh/index.html new file mode 100644 index 00000000..93ada871 --- /dev/null +++ b/openstack/create-and-connect-to-the-VM/bastion-host-based-ssh/index.html @@ -0,0 +1,4569 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Bastion Host

+

A bastion host is a server that provides secure access to private networks over +SSH from an external network, such as the Internet. We can leverage a bastion +host to record all SSH sessions established with private network instances +which enables auditing and can help us in efforts to comply with regulatory requirements.

+

The following diagram illustrates the concept of using an SSH bastion host to +provide access to Linux instances running inside OpenStack cloud network.

+

Bastion Host SSH tunnel

+

In OpenStack, users can deploy instances in a private tenant network. In order +to make these instances to be accessible externally via internet, the tenant +must assign each instance a Floating IP address i.e., an external public IP. +Nevertheless, users may still want a way to deploy instances without having to +assign a Floating IP address for every instance.

+

This is useful in the context of an OpenStack project as you don't necessarily +want to reserve a Floating IP for all your instances. This way you can isolate +certain resources so that there is only a single point of access to them and +conserve Floating IP addresses so that you don't need as big of a quota.

+

Leveraging an SSH bastion host allows this sort of configuration while still +enabling SSH access to the private instances.

+

Before trying to access instances from the outside world using SSH tunneling +via Bastion Host, you need to make sure you have followed these steps:

+
    +
  • You followed the instruction in Create a Key Pair + to set up a public ssh key. You can use the same key for both the bastion + host and the remote instances, or different keys; you'll just need to ensure + that the keys are loaded by ssh-agent appropriately so they can be used as + needed. Please read this instruction + on how to add ssh-agent and load your private key using ssh-add command to + access the bastion host.
  • +
+

Verify you have an SSH agent running. This should match whatever you built +your cluster with.

+
ssh-add -l
+
+

If you need to add the key to your agent:

+
ssh-add path/to/private/key
+
+

Now you can SSH into the bastion host:

+
ssh -A <user>@<bastion-floating-IP>
+
+
    +
  • +

    Your public ssh-key was selected (in the Access and Security tab) while + launching the instance.

    +
  • +
  • +

    Add two Security Groups, one will be used by the Bastion host and another one + will be used by any private instances.

    +
  • +
+

Security Groups

+

i. Bastion Host Security Group:

+

Allow inbound SSH (optional ICMP) for this security group. Make sure you have +added rules in the Security Groups +to allow ssh to the bastion host.

+

Bastion Host Security Group

+

ii. Private Instances Security Group:

+

You need to select "Security Group" in Remote dropdown option, and +then select the "Bastion Host Security Group" under Security +Group option as shown below:

+

Bastion Host Security Group as SG

+

Private Instances Security Group

+ +

Make a note of the Floating IP you have associated to your instance.

+

Floating IP

+

While adding the Bastion host and private instance, please select appropriate +Security Group as shown below:

+

private1:

+

private1 Instance Security Group

+

bastion_host_demo:

+

Bastion Host Security Group

+

Finally, you'll want to configure the ProxyJump setting for the remote +instances in your SSH configuration file (typically found in ~/.ssh/config). +In SSH configuration file, we can define multiple hosts by pet names, specify +custom ports, hostnames, users, etc. For example, let's say that you had a +remote instance named "private1" and you wanted to run SSH connections +through a bastion host called "bastion". The appropriate SSH configuration +file might look something like this:

+
Host bastion
+  HostName 140.247.152.139
+  User ubuntu
+
+Host private1
+  Hostname 192.168.0.40
+  User ubuntu
+  ProxyJump bastion
+
+

ProxyJump makes it super simple to jump from one host to another totally transparently.

+

OR,

+

if you don't have keys loaded by ssh-add command starting ssh-agent on your +local machine. you can load the private key using IdentityFile variable in +SSH configuration file as shown below:

+
Host private1
+  Hostname 192.168.0.40
+  User ubuntu
+  IdentityFile ~/.ssh/cloud.key
+  ProxyJump bastion
+
+Host bastion
+  HostName 140.247.152.139
+  User ubuntu
+  IdentityFile ~/.ssh/cloud.key
+
+

With this configuration in place, when you type ssh private1 SSH will +establish a connection to the bastion host and then through the bastion host +connect to "private1", using the agent added keys or specified private keys.

+

In this sort of arrangement, SSH traffic to private servers that are not +directly accessible via SSH is instead directed through a bastion host, which +proxies the connection between the SSH client and the remote servers. The +bastion host runs on an instance that is typically in a public subnet with +attached floating public IP. Private instances are in a subnet that is not +publicly accessible, and they are set up with a security group that allows SSH +access from the security group attached to the underlying instance running the +bastion host.

+

The user won't see any of this; he or she will just see a shell for +"private1" appear. If you dig a bit further, though (try running who on the +remote node), you'll see the connections are coming from the bastion host, not +the original SSH client.

+

Successful SSH Connection

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/create-and-connect-to-the-VM/create-a-Windows-VM/index.html b/openstack/create-and-connect-to-the-VM/create-a-Windows-VM/index.html new file mode 100644 index 00000000..fc72d4ed --- /dev/null +++ b/openstack/create-and-connect-to-the-VM/create-a-Windows-VM/index.html @@ -0,0 +1,4986 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Create a Windows virtual machine

+

Launch an Instance using a boot volume

+

In this example, we will illustrate how to utilize a boot volume to launch a +Windows virtual machine, similar steps can be used on other types of virtual +machines. The following steps show how to create a virtual machine which boots +from an external volume:

+
    +
  • +

    Create a volume with source data from the image

    +
  • +
  • +

    Launch a VM with that volume as the system disk

    +
  • +
+
+

Recommendations

+
    +
  • +

    The recommended method to create a Windows desktop virtual machine is boot +from volume, although you can also launch a Windows-based instance following +the normal process using boot from image as described here.

    +
  • +
  • +

    To ensure smooth upgrade and maintenance of the system, select at least +100 GiB for the size of the volume.

    +
  • +
  • +

    Make sure your project has sufficient storage quotas.

    +
  • +
+
+

Create a volume from image

+

1. Using NERC's Horizon dashboard

+

Navigate: Project -> Compute -> Images.

+

Make sure you are able to see MS-Windows-2022 is available on Images List for +your project as shown below:

+

MS-Windows-2022 OpenStack Image

+

Create a Volume using that Windows Image:

+

MS-Winodws-2022 Image to Volume Create

+

To ensure smooth upgrade and maintenance of the system, select at least 100 GiB +for the size of the volume as shown below:

+

Create Volume

+

2. Using the OpenStack CLI

+

Prerequisites:

+

To run the OpenStack CLI commands, you need to have:

+ +

To create a volume from image using the CLI, do this:

+

Using the openstack client commands

+

Identify the image for the initial volume contents from openstack image list.

+
openstack image list
++--------------------------------------+---------------------+--------+
+| ID                                   | Name                | Status |
++--------------------------------------+---------------------+--------+
+| a9b48e65-0cf9-413a-8215-81439cd63966 | MS-Windows-2022     | active |
+...
++--------------------------------------+---------------------+--------+
+
+

In the example above, this is image id a9b48e65-0cf9-413a-8215-81439cd63966 for +MS-Windows-2022.

+

Creating a disk from this image with a size of 100 GiB named "my-volume" +as follows.

+
openstack volume create --image a9b48e65-0cf9-413a-8215-81439cd63966 --size 100 --description "Using MS Windows Image" my-volume
++---------------------+--------------------------------------+
+| Field               | Value                                |
++---------------------+--------------------------------------+
+| attachments         | []                                   |
+| availability_zone   | nova                                 |
+| bootable            | false                                |
+| consistencygroup_id | None                                 |
+| created_at          | 2024-02-03T23:38:50.000000           |
+| description         | Using MS Windows Image               |
+| encrypted           | False                                |
+| id                  | d8a5da4c-41c8-4c2d-b57a-8b6678ce4936 |
+| multiattach         | False                                |
+| name                | my-volume                            |
+| properties          |                                      |
+| replication_status  | None                                 |
+| size                | 100                                  |
+| snapshot_id         | None                                 |
+| source_volid        | None                                 |
+| status              | creating                             |
+| type                | tripleo                              |
+| updated_at          | None                                 |
+| user_id             | 938eb8bfc72e4cb3ad2b94e2eb4059f7     |
++---------------------+--------------------------------------+
+
+

Checking the status again using openstack volume show my-volume will allow the +volume creation to be followed.

+

"downloading" means that the volume contents is being transferred from the image +service to the volume service

+

"available" means the volume can now be used for booting. A set of volume_image +meta data is also copied from the image service.

+

Launch instance from existing bootable volume

+

1. Using Horizon dashboard

+

Navigate: Project -> Volumes -> Volumes.

+

Once successfully Volume is created, we can use the Volume to launch an instance +as shown below:

+

Launch Instance from Volume

+
+

How do you make your VM setup and data persistent?

+

Only one instance at a time can be booted from a given volume. Make sure +"Delete Volume on Instance Delete" is selected as No if you want the +volume to persist even after the instance is terminated, which is the +default setting, as shown below:

+

Instance Persistent Storage Option

+

NOTE: For more in-depth information on making your VM setup and data persistent, +you can explore the details here.

+
+

Add other information and setup a Security Group that allows RDP (port: 3389) as +shown below:

+

Launch Instance Security Group for RDP

+
+

Very Important: Setting Administrator Credentials to Log into Your VM.

+

To access this Windows VM, you must log in using Remote Desktop, as +described here. +To configure a password for the "Administrator" user account, proceed to the +"Configuration" section and enter the supplied PowerShell-based Customized Script. +Make sure to substitute <Your_Own_Admin_Password> with your preferred password, +which will enable Remote Desktop login to the Windows VM.

+
#ps1
+
+net user Administrator <Your_Own_Admin_Password>
+
+

Please ensure that your script in the "Configuration" section resembles the +following syntax:

+

Setting Administrator Password Custom Script

+
+

After some time the instance will be Active in Running state as shown below:

+

Running Windows Instance

+

Attach a Floating IP to your instance:

+

Associate Floating IP

+

2. Using the OpenStack CLI from the terminal

+

Prerequisites:

+

To run the OpenStack CLI commands, you need to have:

+ +

To launch an instance from existing bootable volume using the CLI, do this:

+

Using the openstack client commands from terminal

+

Get the flavor name using openstack flavor list:

+
openstack flavor list | grep cpu-su.4
+| b3f5dded-efe3-4630-a988-2959b73eba70 | cpu-su.4      |  16384 |   20 |         0 |     4 | True      |
+
+

To access this Windows VM, you must log in using Remote Desktop, as +described here. Before +launching the VM using the OpenStack CLI, we'll prepare a PowerShell-based Customized +Script as "user-data".

+
+

What is a user data file?

+

A user data file is a text file that you can include when running the +openstack server create command. This file is used to customize your +instance during boot.

+
+

You can place user data in a local file and pass it through the +--user-data <user-data-file> parameter at instance creation. You'll create a +local file named admin_password.ps1 with the following content. Please remember +to replace <Your_Own_Admin_Password> with your chosen password, which will be +used to log in to the Windows VM via Remote Desktop.

+
#ps1
+
+net user Administrator <Your_Own_Admin_Password>
+
+

Setup a Security Group named "rdp_test" that allows RDP (port: 3389) using the +CLI, use the command openstack security group create <group-name>:

+
openstack security group create --description 'Allows RDP' rdp_test
+
+openstack security group rule create --protocol tcp --dst-port 3389 rdp_test
+
+

To create a Windows VM named "my-vm" using the specified parameters, including the +flavor name "cpu-su.4", existing key pair "my-key", security group "rdp_test", +user data from the file "admin_password.ps1" created above, and the volume with +name "my-volume" created above, you can run the following command:

+
openstack server create --flavor cpu-su.4 \
+    --key-name my-key \
+    --security-group rdp_test \
+    --user-data admin_password.ps1 \
+    --volume my-volume \
+    my-vm
+
+

To list all Floating IP addresses that are allocated to the current project, run:

+
openstack floating ip list
+
++--------------------------------------+---------------------+------------------+------+
+| ID                                   | Floating IP Address | Fixed IP Address | Port |
++--------------------------------------+---------------------+------------------+------+
+| 760963b2-779c-4a49-a50d-f073c1ca5b9e | 199.94.60.220       | 192.168.0.195    | None |
++--------------------------------------+---------------------+------------------+------+
+
+
+

More About Floating IP

+

If the above command returns an empty list, meaning you don't have any +available floating IPs, please refer to this documentation +on how to allocate a new Floating IP to your project.

+
+

Attach a Floating IP to your instance:

+
openstack server add floating ip INSTANCE_NAME_OR_ID FLOATING_IP_ADDRESS
+
+

For example:

+
openstack server add floating ip my-vm 199.94.60.220
+
+

Accessing the graphical console in the Horizon dashboard

+

You can access the graphical console using the browser once the VM is in status +ACTIVE. It can take up to 15 minutes to reach this state.

+

The console is accessed by selecting the Instance Details for the machine and the +'Console' tab as shown below:

+

View Console of Instance

+

Administrator Sign in Prompt

+

How to add Remote Desktop login to your Windows instance

+

When the build and the Windows installation steps have completed, you can access +the console using the Windows Remote Desktop application. Remote Desktop login +should work with the Floating IP associated with the instance:

+

Search Remote Desktop Protocol locally

+

Connect to Remote Instance using Floating IP

+

Prompted Administrator Login

+
+

What is the user login for Windows Server 2022?

+

The default username is "Administrator," and the password is the one you set +using the user data PowerShell script during the launch.

+
+

Prompted RDP connection

+

Successfully Remote Connected Instance

+
+

Storage and Volume

+
    +
  • +

    System disks are the first disk based on the flavor disk space and are +generally used to store the operating system created from an image when the +virtual machine is booted.

    +
  • +
  • +

    Volumes are +persistent virtualized block devices independent of any particular instance. +Volumes may be attached to a single instance at a time, but may be detached +or reattached to a different instance while retaining all data, much like a +USB drive. The size of the volume can be selected when it is created within +the storage quota limits for the particular resource allocation.

    +
  • +
+
+

Connect additional disk using volume

+

To attach additional disk to a running Windows machine you can follow +this documentation. +This guide +provides instructions on formatting and mounting a volume as an attached disk +within a Windows virtual machine.

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/create-and-connect-to-the-VM/flavors/index.html b/openstack/create-and-connect-to-the-VM/flavors/index.html new file mode 100644 index 00000000..a11ab397 --- /dev/null +++ b/openstack/create-and-connect-to-the-VM/flavors/index.html @@ -0,0 +1,5319 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Nova flavors

+

In NERC OpenStack, flavors define the compute, memory, and storage capacity of +nova computing instances. In other words, a flavor is an available hardware +configuration for a server.

+
+

Note

+

Flavors are visible only while you are launching an instance and under "Flavor" +tab as explained here.

+
+

The important fields are

+ + + + + + + + + + + + + + + + + + + + + + + + + +
FieldDescription
RAMMemory size in MiB
DiskSize of disk in GiB
EphemeralSize of a second disk. 0 means no second disk is defined and mounted.
VCPUsNumber of virtual cores
+

Comparison Between CPU and GPU

+

Here are the key differences between CPUs and GPUs:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
CPUsGPUs
Work mostly in sequence. While several cores and excellent task switching give the impression of parallelism, a CPU is fundamentally designed to run one task at a time.Are designed to work in parallel. A vast number of cores and threading managed in hardware enable GPUs to perform many simple calculations simultaneously.
Are designed for task parallelism.Are designed for data parallelism.
Have a small number of cores that can complete single complex tasks at very high speeds.Have a large number of cores that work in tandem to compute many simple tasks.
Have access to a large amount of relatively slow RAM with low latency, optimizing them for latency (operation).Have access to a relatively small amount of very fast RAM with higher latency, optimizing them for throughput.
Have a very versatile instruction set, allowing the execution of complex tasks in fewer cycles but creating overhead in others.Have a limited (but highly optimized) instruction set, allowing them to execute their designed tasks very efficiently.
Task switching (as a result of running the OS) creates overhead.Task switching is not used; instead, numerous serial data streams are processed in parallel from point A to point B.
Will always work for any given use case but may not provide adequate performance for some tasks.Would only be a valid choice for some use cases but would provide excellent performance in those cases.
+

In summary, for applications such as Machine Learning (ML), Artificial +Intelligence (AI), or image processing, a GPU can provide a performance increase +of 50x to 200x compared to a typical CPU performing the same tasks.

+

Currently, our setup supports and offers the following flavors

+

NERC offers the following flavors based on our Infrastructure-as-a-Service +(IaaS) - OpenStack offerings (Tiers of Service).

+
+

Pro Tip

+

Choose a flavor for your instance from the available Tier that suits your +requirements, use-cases, and budget when launching a VM as shown here.

+
+

1. Standard Compute Tier

+

The standard compute flavor "cpu-su" is provided from Lenovo SD530 (2x Intel +8268 2.9 GHz, 48 cores, 384 GB memory) server. The base unit is 1 vCPU, 4 GB +memory with default of 20 GB root disk at a rate of $0.013 / hr of wall time.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlavorSUsGPUvCPURAM(GiB)Storage(GiB)Cost / hr
cpu-su.1101420$0.013
cpu-su.2202820$0.026
cpu-su.44041620$0.052
cpu-su.88083220$0.104
cpu-su.16160166420$0.208
+

2. Memory Optimized Tier

+

The memory optimized flavor "mem-su" is provided from the same servers at +"cpu-su" but with 8 GB of memory per core. The base unit is 1 vCPU, 8 GB +memory with default of 20 GB root disk at a rate of $0.026 / hr of wall time.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlavorSUsGPUvCPURAM(GiB)Storage(GiB)Cost / hr
mem-su.1101820$0.026
mem-su.22021620$0.052
mem-su.44043220$0.104
mem-su.88086420$0.208
mem-su.161601612820$0.416
+

3. GPU Tier

+

NERC also supports the most demanding workloads including Artificial Intelligence +(AI), Machine Learning (ML) training and Deep Learning modeling, simulation, +data analytics, data visualization, distributed databases, and more. For such +demanding workloads, the NERC's GPU-based distributed computing flavor is +recommended, which is integrated into a specialized hardware such as GPUs +that produce unprecedented performance boosts for technical computing workloads.

+
+

Guidelines for Utilizing GPU-Based Flavors in Active Resource Allocation

+

To effectively utilize GPU-based flavors on any NERC (OpenStack) resource allocation, +the Principal Investigator (PI) or project manager(s) must submit a +change request +for their currently active NERC (OpenStack) resource allocation. This request +should specify the number of GPUs they intend to use by setting the "OpenStack +GPU Quota" attribute. We recommend ensuring that this count accurately reflects +the current GPU usage. Additionally, they need to adjust the quota values for +"OpenStack Compute RAM Quota (MiB)" and "OpenStack Compute vCPU Quota" to sufficiently +accommodate the GPU flavor they wish to use when launching a VM in their +OpenStack Project.

+

Once the change request is reviewed and approved by the NERC's admin, users +will be able to select the appropriate GPU-based flavor during the flavor +selection tab +when launching a new VM.

+
+

There are four different options within the GPU tier, featuring the newer +NVIDIA A100 SXM4, NVIDIA A100s, NVIDIA V100s, and NVIDIA K80s.

+
+

How can I get customized A100 SXM4 GPUs not listed in the current flavors?

+

We also provide customized A100 SXM4 GPU-based flavors, which are not publicly +listed on our NVIDIA A100 SXM4 40GB GPU Tiers list. These options are exclusively +available for demanding projects and are subject to availability.

+

To request access, please fill out this form. +Our team will review your request and reach out to you to discuss further.

+
+

i. NVIDIA A100 SXM4 40GB

+

The "gpu-su-a100sxm4" flavor is provided from Lenovo SD650-N V2 (2x Intel Xeon +Platinum 8358 32C 250W 2.6GHz, 128 cores, 1024 GB RAM 4x NVIDIA HGX A100 40GB) servers. +The higher number of tensor cores available can significantly enhance the speed +of machine learning applications. The base unit is 32 vCPU, 240 GB memory with +default of 20 GB root disk at a rate of $2.078 / hr of wall time.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlavorSUsGPUvCPURAM(GiB)Storage(GiB)Cost / hr
gpu-su-a100sxm4.1113224020$2.078
gpu-su-a100sxm4.2226448020$4.156
+
+

How to setup NVIDIA driver for "gpu-su-a100sxm4" flavor based VM?

+

After launching a VM with an NVIDIA A100 SXM4 GPU flavor, you will need to +setup the NVIDIA driver in order to use GPU-based codes and libraries. +Please run the following commands to setup the NVIDIA driver and CUDA +version required for these flavors in order to execute GPU-based codes. +NOTE: These commands are ONLY applicable for the VM based on +"ubuntu-22.04-x86_64" image. You might need to find corresponding +packages for your own OS of choice.

+
sudo apt update
+sudo apt -y install nvidia-driver-495
+# Just click *Enter* if any popups appear!
+# Confirm and verify that you can see the NVIDIA device attached to your VM
+lspci | grep -i nvidia
+# 00:05.0 3D controller: NVIDIA Corporation GA100 [A100 SXM4 40GB] (rev a1)
+sudo reboot
+# SSH back to your VM and then you will be able to use nvidia-smi command
+nvidia-smi
+
+
+

ii. NVIDIA A100 40GB

+

The "gpu-su-a100" flavor is provided from Lenovo SR670 (2x Intel 8268 2.9 GHz, +48 cores, 384 GB memory, 4x NVIDIA A100 40GB) servers. These latest GPUs deliver +industry-leading high throughput and low latency networking. The base unit is 24 +vCPU, 74 GB memory with default of 20 GB root disk at a rate of $1.803 / hr of +wall time.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlavorSUsGPUvCPURAM(GiB)Storage(GiB)Cost / hr
gpu-su-a100.111247420$1.803
gpu-su-a100.2224814820$3.606
+
+

How to setup NVIDIA driver for "gpu-su-a100" flavor based VM?

+

After launching a VM with an NVIDIA A100 GPU flavor, you will need to +setup the NVIDIA driver in order to use GPU-based codes and libraries. +Please run the following commands to setup the NVIDIA driver and CUDA +version required for these flavors in order to execute GPU-based codes. +NOTE: These commands are ONLY applicable for the VM based on +"ubuntu-22.04-x86_64" image. You might need to find corresponding +packages for your own OS of choice.

+
sudo apt update
+sudo apt -y install nvidia-driver-495
+# Just click *Enter* if any popups appear!
+# Confirm and verify that you can see the NVIDIA device attached to your VM
+lspci | grep -i nvidia
+# 0:05.0 3D controller: NVIDIA Corporation GA100 [A100 PCIe 40GB] (rev a1)
+sudo reboot
+# SSH back to your VM and then you will be able to use nvidia-smi command
+nvidia-smi
+
+
+

iii. NVIDIA V100 32GB

+

The "gpu-su-v100" flavor is provided from Dell R740xd (2x Intel Xeon Gold 6148, +40 cores, 768GB memory, 1x NVIDIA V100 32GB) servers. The base unit is 48 vCPU, +192 GB memory with default of 20 GB root disk at a rate of $1.214 / hr of wall time.

+ + + + + + + + + + + + + + + + + + + + + + + +
FlavorSUsGPUvCPURAM(GiB)Storage(GiB)Cost / hr
gpu-su-v100.1114819220$1.214
+
+

How to setup NVIDIA driver for "gpu-su-v100" flavor based VM?

+

After launching a VM with an NVIDIA V100 GPU flavor, you will need to +setup the NVIDIA driver in order to use GPU-based codes and libraries. +Please run the following commands to setup the NVIDIA driver and CUDA +version required for these flavors in order to execute GPU-based codes. +NOTE: These commands are ONLY applicable for the VM based on +"ubuntu-22.04-x86_64" image. You might need to find corresponding +packages for your own OS of choice.

+
sudo apt update
+sudo apt -y install nvidia-driver-470
+# Just click *Enter* if any popups appear!
+# Confirm and verify that you can see the NVIDIA device attached to your VM
+lspci | grep -i nvidia
+# 00:05.0 3D controller: NVIDIA Corporation GV100GL [Tesla V100 PCIe 32GB] (rev a1)
+sudo reboot
+# SSH back to your VM and then you will be able to use nvidia-smi command
+nvidia-smi
+
+
+

iv. NVIDIA K80 12GB

+

The "gpu-su-k80" flavor is provided from Supermicro X10DRG-H (2x Intel +E5-2620 2.40GHz, 24 cores, 128GB memory, 4x NVIDIA K80 12GB) servers. The base unit +is 6 vCPU, 28.5 GB memory with default of 20 GB root disk at a rate of $0.463 / +hr of wall time.

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
FlavorSUsGPUvCPURAM(GiB)Storage(GiB)Cost / hr
gpu-su-k80.111628.520$0.463
gpu-su-k80.222125720$0.926
gpu-su-k80.4442411420$1.852
+
+

How to setup NVIDIA driver for "gpu-su-k80" flavor based VM?

+

After launching a VM with an NVIDIA K80 GPU flavor, you will need to +setup the NVIDIA driver in order to use GPU-based codes and libraries. +Please run the following commands to setup the NVIDIA driver and CUDA +version required for these flavors in order to execute GPU-based codes. +NOTE: These commands are ONLY applicable for the VM based on +"ubuntu-22.04-x86_64" image. You might need to find corresponding +packages for your own OS of choice.

+
sudo apt update
+sudo apt -y install nvidia-driver-470
+# Just click *Enter* if any popups appear!
+# Confirm and verify that you can see the NVIDIA device attached to your VM
+lspci | grep -i nvidia
+# 00:05.0 3D controller: NVIDIA Corporation GK210GL [Tesla K80] (rev a1)
+sudo reboot
+# SSH back to your VM and then you will be able to use nvidia-smi command
+nvidia-smi
+
+
+
+

NERC IaaS Storage Tiers Cost

+

Storage both OpenStack Swift (object storage) and +Cinder (block storage/ volumes) are charged separately at a rate of +$0.009 TiB/hr or $9.00E-6 GiB/hr. More about cost +can be found here and +some of the common billing related FAQs are listed here.

+
+

How can I get customized A100 SXM4 GPUs not listed in the current flavors?

+

We also provide customized A100 SXM4 GPU-based flavors, which are not publicly +listed on our NVIDIA A100 SXM4 40GB GPU Tiers list. These options are exclusively +available for demanding projects and are subject to availability.

+

To request access, please fill out this form. +Our team will review your request and reach out to you to discuss further.

+

How to Change Flavor of an instance

+

Using Horizon dashboard

+

Once you're logged in to NERC's Horizon dashboard, you can navigate to +Project -> Compute -> Instances.

+

You can select the instance you wish to extend or change the flavor. Here, you +will see several options available under the Actions menu located on the right-hand +side of your instance, as shown here:

+

Resize VM's Instance

+

Click "Resize Instance".

+

In the Resize Instance dialog box, select the new flavor of your choice under the +"New Flavor" dropdown options. In this example, we are changing the current flavor +"cpu-su.1" to the new flavor "cpu-su.2" for our VM, as shown below:

+

Resize Instance Dialog

+

Once reviwing the new flavor details and verified all details, press "Resize" button.

+
+

Very Important Information

+

You will only be able to choose flavors that are within your current available +resource quotas, i.e., vCPUs and RAM.

+
+

You will see the status of the resize in the following page.

+

When it says "Confirm or Revert Resize/Migrate", login to the instance and verify +that it worked as intended (meaning the instance is working as before but with +the new flavor).

+

If you are happy with the result, press "Confirm Resize/Rigrate" in drop-down to +the far right (it should be pre-selected) as shown below:

+

Confirm Resize/Migrate

+

This will finalise the process and make it permanent.

+

If you are unhappy (for some reason the process failed), you are able to instead +press "Revert resize/Migrate" (available in the drop-down). This will revert the +process.

+

Using the CLI

+

Prerequisites:

+

To run the OpenStack CLI commands, you need to have:

+ +

If you want to change the flavor that is bound to a VM, then you can run the +following openstack client commands, here we are changing flavor of an existing +VM i.e. named "test-vm" from mem-su.2 to mem-su.4:

+

First, stop the running VM using:

+
openstack server stop test-vm
+
+

Then, verify the status is "SHUTOFF" and also the used flavor is mem-su.2 as +shown below:

+
openstack server list
++--------------------------------------+------+---------+--------------------------------------------+--------------------------+---------+
+| ID | Name | Status | Networks | Image | Flavor |
++--------------------------------------+------+---------+--------------------------------------------+--------------------------+---------+
+| cd51dbba-fe95-413c-9afc-71370be4d4fd | test-vm | SHUTOFF | default_network=192.168.0.58, 199.94.60.10 | N/A (booted from volume) | mem-su.2 |
++--------------------------------------+------+---------+--------------------------------------------+--------------------------+---------+
+
+

Then, resize the flavor from mem-su.2 to mem-su.4 by running:

+
openstack server resize --flavor mem-su.4 cd51dbba-fe95-413c-9afc-71370be4d4fd
+
+

Confirm the resize:

+
openstack server resize confirm cd51dbba-fe95-413c-9afc-71370be4d4fd
+
+

Then, start the VM:

+
openstack server start cd51dbba-fe95-413c-9afc-71370be4d4fd
+
+

Verify the VM is using the new flavor of mem-su.4 as shown below:

+
openstack server list
++--------------------------------------+------+--------+--------------------------------------------+--------------------------+---------+
+| ID | Name | Status | Networks | Image | Flavor |
++--------------------------------------+------+--------+--------------------------------------------+--------------------------+---------+
+| cd51dbba-fe95-413c-9afc-71370be4d4fd | test-vm | ACTIVE | default_network=192.168.0.58, 199.94.60.10 | N/A (booted from volume) | mem-su.4 |
++--------------------------------------+------+--------+--------------------------------------------+--------------------------+---------+
+
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/create-and-connect-to-the-VM/images/RDP_on_local_machine.png b/openstack/create-and-connect-to-the-VM/images/RDP_on_local_machine.png new file mode 100644 index 00000000..68e9920c Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/RDP_on_local_machine.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/administrator_singin_prompt.png b/openstack/create-and-connect-to-the-VM/images/administrator_singin_prompt.png new file mode 100644 index 00000000..30ddf2bd Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/administrator_singin_prompt.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/console_win_instance.png b/openstack/create-and-connect-to-the-VM/images/console_win_instance.png new file mode 100644 index 00000000..980438ad Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/console_win_instance.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/create_volume.png b/openstack/create-and-connect-to-the-VM/images/create_volume.png new file mode 100644 index 00000000..d9e32f98 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/create_volume.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/flavor-not-available-due-to-quota.png b/openstack/create-and-connect-to-the-VM/images/flavor-not-available-due-to-quota.png new file mode 100644 index 00000000..44b75fb7 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/flavor-not-available-due-to-quota.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/floating_ip_allocate.png b/openstack/create-and-connect-to-the-VM/images/floating_ip_allocate.png new file mode 100644 index 00000000..af5b2412 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/floating_ip_allocate.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/floating_ip_allocate_success.png b/openstack/create-and-connect-to-the-VM/images/floating_ip_allocate_success.png new file mode 100644 index 00000000..a7400978 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/floating_ip_allocate_success.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/floating_ip_associate.png b/openstack/create-and-connect-to-the-VM/images/floating_ip_associate.png new file mode 100644 index 00000000..ba1ee567 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/floating_ip_associate.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/floating_ip_created_successfully.png b/openstack/create-and-connect-to-the-VM/images/floating_ip_created_successfully.png new file mode 100644 index 00000000..c32f70af Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/floating_ip_created_successfully.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/floating_ip_disassociate.png b/openstack/create-and-connect-to-the-VM/images/floating_ip_disassociate.png new file mode 100644 index 00000000..fef78155 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/floating_ip_disassociate.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/floating_ip_disassociate_release.png b/openstack/create-and-connect-to-the-VM/images/floating_ip_disassociate_release.png new file mode 100644 index 00000000..55d06eb4 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/floating_ip_disassociate_release.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/floating_ip_is_associated.png b/openstack/create-and-connect-to-the-VM/images/floating_ip_is_associated.png new file mode 100644 index 00000000..8edb8a1a Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/floating_ip_is_associated.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/floating_ip_none.png b/openstack/create-and-connect-to-the-VM/images/floating_ip_none.png new file mode 100644 index 00000000..95c1f775 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/floating_ip_none.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/floating_ip_quota_exceed.png b/openstack/create-and-connect-to-the-VM/images/floating_ip_quota_exceed.png new file mode 100644 index 00000000..a81900e4 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/floating_ip_quota_exceed.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/floating_ip_release.png b/openstack/create-and-connect-to-the-VM/images/floating_ip_release.png new file mode 100644 index 00000000..ed53854e Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/floating_ip_release.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/index.html b/openstack/create-and-connect-to-the-VM/images/index.html new file mode 100644 index 00000000..8bef383b --- /dev/null +++ b/openstack/create-and-connect-to-the-VM/images/index.html @@ -0,0 +1,4610 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Images

+

Image composed of a virtual collection of a kernel, operating system, and configuration.

+

Glance

+

Glance is the API-driven OpenStack image service that provides services and associated +libraries to store, browse, register, distribute, and retrieve bootable disk images. +It acts as a registry for virtual machine images, allowing users to copy server +images for immediate storage. These images can be used as templates when setting +up new instances.

+

NERC Images List

+

Once you're logged in to NERC's Horizon dashboard.

+

Navigate to Project -> Compute -> Images.

+

NERC provides a set of default images that can be used as source while launching +an instance:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
IDName
a9b48e65-0cf9-413a-8215-81439cd63966MS-Windows-2022
cfecb5d4-599c-4ffd-9baf-9cbe35424f97almalinux-8-x86_64
263f045e-86c6-4344-b2de-aa475dbfa910almalinux-9-x86_64
41fa5991-89d5-45ae-8268-b22224c772b2debian-10-x86_64
99194159-fcd1-4281-b3e1-15956c275692fedora-36-x86_64
74a33f77-fc42-4dd1-a5a2-55fb18fc50ccrocky-8-x86_64
d7d41e5f-58f4-4ba6-9280-7fef9ac49060rocky-9-x86_64
75a40234-702b-4ab7-9d83-f436b05827c9ubuntu-18.04-x86_64
8c87cf6f-32f9-4a4b-91a5-0d734b7c9770ubuntu-20.04-x86_64
da314c41-19bf-486a-b8da-39ca51fd17deubuntu-22.04-x86_64
17912292-8861-489a-b37e-bb78e15b934aubuntu-24.04-x86_64
+

How to create and upload own custom images?

+

Beside the above mentioned system provided images users can customize and upload +their own images to the NERC, as documented in this documentation.

+

Please refer to this guide +to learn more about how to obtain other publicly available virtual machine images +for the NERC OpenStack platform within your project space.

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/create-and-connect-to-the-VM/images/instance-boot-source-options.png b/openstack/create-and-connect-to-the-VM/images/instance-boot-source-options.png new file mode 100644 index 00000000..3bd0b6d1 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/instance-boot-source-options.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/instance-persistent-storage-option.png b/openstack/create-and-connect-to-the-VM/images/instance-persistent-storage-option.png new file mode 100644 index 00000000..1daf87bd Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/instance-persistent-storage-option.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/instance_configuration.png b/openstack/create-and-connect-to-the-VM/images/instance_configuration.png new file mode 100644 index 00000000..3f88b2f6 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/instance_configuration.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/launch_a_vm.png b/openstack/create-and-connect-to-the-VM/images/launch_a_vm.png new file mode 100644 index 00000000..26f14dca Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/launch_a_vm.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/launch_flavor.png b/openstack/create-and-connect-to-the-VM/images/launch_flavor.png new file mode 100644 index 00000000..048a4847 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/launch_flavor.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/launch_instance_from_volume.png b/openstack/create-and-connect-to-the-VM/images/launch_instance_from_volume.png new file mode 100644 index 00000000..b4497ece Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/launch_instance_from_volume.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/launch_networks.png b/openstack/create-and-connect-to-the-VM/images/launch_networks.png new file mode 100644 index 00000000..35e0529c Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/launch_networks.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/launch_security_groups.png b/openstack/create-and-connect-to-the-VM/images/launch_security_groups.png new file mode 100644 index 00000000..09994eb6 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/launch_security_groups.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/launch_security_key_pairs.png b/openstack/create-and-connect-to-the-VM/images/launch_security_key_pairs.png new file mode 100644 index 00000000..df3e0ee7 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/launch_security_key_pairs.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/launch_source.png b/openstack/create-and-connect-to-the-VM/images/launch_source.png new file mode 100644 index 00000000..ae4b5f35 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/launch_source.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/persistent_volume.png b/openstack/create-and-connect-to-the-VM/images/persistent_volume.png new file mode 100644 index 00000000..8a2e7c4c Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/persistent_volume.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/prompted_administrator_login.png b/openstack/create-and-connect-to-the-VM/images/prompted_administrator_login.png new file mode 100644 index 00000000..dc7da852 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/prompted_administrator_login.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/prompted_rdp_connection.png b/openstack/create-and-connect-to-the-VM/images/prompted_rdp_connection.png new file mode 100644 index 00000000..a5b69f5d Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/prompted_rdp_connection.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/rdp_popup_for_xrdp.png b/openstack/create-and-connect-to-the-VM/images/rdp_popup_for_xrdp.png new file mode 100644 index 00000000..90c9ec24 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/rdp_popup_for_xrdp.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/rdp_windows_for_xrdp.png b/openstack/create-and-connect-to-the-VM/images/rdp_windows_for_xrdp.png new file mode 100644 index 00000000..400a74ee Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/rdp_windows_for_xrdp.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/remote_connected_instance.png b/openstack/create-and-connect-to-the-VM/images/remote_connected_instance.png new file mode 100644 index 00000000..e79ca6d9 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/remote_connected_instance.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/remote_connection_floating_ip.png b/openstack/create-and-connect-to-the-VM/images/remote_connection_floating_ip.png new file mode 100644 index 00000000..2e11ce9b Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/remote_connection_floating_ip.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/resize_instance_confirm.png b/openstack/create-and-connect-to-the-VM/images/resize_instance_confirm.png new file mode 100644 index 00000000..89e8fa3e Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/resize_instance_confirm.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/resize_instance_dialog.png b/openstack/create-and-connect-to-the-VM/images/resize_instance_dialog.png new file mode 100644 index 00000000..e27ee7bf Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/resize_instance_dialog.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/resize_instance_flavor.png b/openstack/create-and-connect-to-the-VM/images/resize_instance_flavor.png new file mode 100644 index 00000000..392f1e03 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/resize_instance_flavor.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/running_instance.png b/openstack/create-and-connect-to-the-VM/images/running_instance.png new file mode 100644 index 00000000..df8eba94 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/running_instance.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/security_group_for_rdp.png b/openstack/create-and-connect-to-the-VM/images/security_group_for_rdp.png new file mode 100644 index 00000000..18ec5878 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/security_group_for_rdp.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/set_windows_administrator_password.png b/openstack/create-and-connect-to-the-VM/images/set_windows_administrator_password.png new file mode 100644 index 00000000..070ae39a Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/set_windows_administrator_password.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/show_options_rdp_windows.png b/openstack/create-and-connect-to-the-VM/images/show_options_rdp_windows.png new file mode 100644 index 00000000..b32d38cb Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/show_options_rdp_windows.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/ssh_to_vm.png b/openstack/create-and-connect-to-the-VM/images/ssh_to_vm.png new file mode 100644 index 00000000..9463340f Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/ssh_to_vm.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/stack_image_to_volume.png b/openstack/create-and-connect-to-the-VM/images/stack_image_to_volume.png new file mode 100644 index 00000000..1cfb3ba7 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/stack_image_to_volume.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/stack_images_windows.png b/openstack/create-and-connect-to-the-VM/images/stack_images_windows.png new file mode 100644 index 00000000..cc94e00f Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/stack_images_windows.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/vm_images.png b/openstack/create-and-connect-to-the-VM/images/vm_images.png new file mode 100644 index 00000000..c318fe8b Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/vm_images.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/vm_launch_details.png b/openstack/create-and-connect-to-the-VM/images/vm_launch_details.png new file mode 100644 index 00000000..e1a453eb Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/vm_launch_details.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/win2k22_instance_running.png b/openstack/create-and-connect-to-the-VM/images/win2k22_instance_running.png new file mode 100644 index 00000000..2660a499 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/win2k22_instance_running.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/win_instance_add_floating_ip.png b/openstack/create-and-connect-to-the-VM/images/win_instance_add_floating_ip.png new file mode 100644 index 00000000..3ca90b3d Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/win_instance_add_floating_ip.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/xrdp_desktop.png b/openstack/create-and-connect-to-the-VM/images/xrdp_desktop.png new file mode 100644 index 00000000..fccfee91 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/xrdp_desktop.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/xrdp_display_manager.png b/openstack/create-and-connect-to-the-VM/images/xrdp_display_manager.png new file mode 100644 index 00000000..b9fab5a5 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/xrdp_display_manager.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/xrdp_login.png b/openstack/create-and-connect-to-the-VM/images/xrdp_login.png new file mode 100644 index 00000000..14170285 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/xrdp_login.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/xrdp_macos_add_pc.png b/openstack/create-and-connect-to-the-VM/images/xrdp_macos_add_pc.png new file mode 100644 index 00000000..a29f4e2c Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/xrdp_macos_add_pc.png differ diff --git a/openstack/create-and-connect-to-the-VM/images/xrdp_macos_add_user_account.png b/openstack/create-and-connect-to-the-VM/images/xrdp_macos_add_user_account.png new file mode 100644 index 00000000..d704d602 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/images/xrdp_macos_add_user_account.png differ diff --git a/openstack/create-and-connect-to-the-VM/launch-a-VM/index.html b/openstack/create-and-connect-to-the-VM/launch-a-VM/index.html new file mode 100644 index 00000000..ef9cc89c --- /dev/null +++ b/openstack/create-and-connect-to-the-VM/launch-a-VM/index.html @@ -0,0 +1,4821 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

How to launch an Instance

+

Prerequisites:

+
    +
  • +

    You followed the instruction in Create a Key Pair + to set up a public ssh key.

    +
  • +
  • +

    Make sure you have added rules in the + Security Groups to + allow ssh using Port 22 access to the instance.

    +
  • +
+

Using Horizon dashboard

+

Once you're logged in to NERC's Horizon dashboard.

+

Navigate: Project -> Compute -> Instances.

+

Click on "Launch Instance" button:

+

VM Launch Instance

+

In the Launch Instance dialog box, specify the following values:

+

Details Tab

+

Instance Name: Give your instance a name that assign a name to the virtual machine.

+
+

Important Note

+

The instance name you assign here becomes the initial host name of the server. +If the name is longer than 63 characters, the Compute service truncates it +automatically to ensure dnsmasq works correctly.

+
+

Availability Zone: By default, this value is set to the availability zone given +by the cloud provider i.e. nova.

+

Count: To launch multiple instances, enter a value greater than 1. The default +is 1.

+

VM Launch Instance Detail

+

Source Tab

+

Double check that in the dropdown "Select Boot Source".

+

When you start a new instance, you can choose the Instance Boot Source from the +following list:

+
    +
  • +

    boot from image

    +
  • +
  • +

    boot from instance snapshot

    +
  • +
  • +

    boot from volume

    +
  • +
  • +

    boot from volume snapshot

    +
  • +
+

In its default configuration, when the instance is launched from an Image or +an Instance Snapshot, the choice for utilizing persistent storage is configured +by selecting the Yes option for "Create New Volume". Additionally, the "Delete +Volume on Instance Delete" setting is pre-set to No, as indicated here:

+

Launching an Instance Boot Source

+

If you set the "Create New Volume" option to No, the instance will boot +from either an image or a snapshot, with the instance only being attached to an +ephemeral disk as described here. +To mitigate potential data loss, we strongly recommend regularly taking a snapshot +of such a running ephemeral instance, referred to as an "instance snapshot", +especially if you want to safeguard or recover important states of your instance.

+

When deploying a non-ephemeral instance, which involves creating a new volume and +selecting Yes for "Delete Volume on Instance Delete", deleting the instance +will also remove the associated volume. Consequently, all data on that disk is +permanently lost, which is undesirable when the data on attached volumes needs +to persist even after the instance is deleted. Ideally, selecting "Yes" for this +setting should be reserved for instances where persistent data storage is not required.

+
+

Very Important: How do you make your VM setup and data persistent?

+

For more in-depth information on making your VM setup and data persistent, +you can explore the details here.

+
+

To start a VM, for the first time we will need a base image so, please make sure +"Image" dropdown option is selected. In the example, we chose ubuntu-22.04-x86_64, +you may choose any available images.

+
+

Bootable Images

+

NERC has made several Public bootable images available to the users as +listed here. Customers can also upload their own custom images, +as documented in this guide.

+

To view them, Navigate: Project -> Compute -> Images.

+

VM Images

+
+

VM Launch Instance Source

+
+

How to override the flavor's Default root disk volume size

+

If you don't specify custom value for the "Volume Size (GB)", that will +be set to the root disk size of your selected Flavor. For more about the +default root disk size you can refer to this documentation. +We can override this value by entering our own custom value (in GiB) and that +is available as a Volume that is attach to the instance to enable persistent +storage.

+
+

Flavor Tab

+

Specify the size of the instance to launch. Choose cpu-su.4 from the 'Flavor' +tab by clicking on the "+" icon.

+
+

Important Note

+

In NERC OpenStack, flavors define the compute, memory, and storage +capacity of nova computing instances. In other words, a flavor is an +available hardware configuration for a server.

+

Some of the flavors will not be available for your use as per your resource +Quota limits and will be shown as below:

+

Flavor Not Avaliable due to Your Quota

+

NOTE: More details about available flavors can be found here +and how to change request the current allocation quota attributes can be found +here.

+
+

After choosing cpu-su.4, you should see it moved up to "Allocated".

+

VM Launch Instance Flavor

+
+

Storage and Volume

+
    +
  • +

    System disks are the first disk based on the flavor disk space and are +generally used to store the operating system created from an image when the +virtual machine is booted.

    +
  • +
  • +

    Volumes are +persistent virtualized block devices independent of any particular instance. +Volumes may be attached to a single instance at a time, but may be detached +or reattached to a different instance while retaining all data, much like a +USB drive. The size of the volume can be selected when it is created within +the storage quota limits for the particular resource allocation.

    +
  • +
+
+

Networks Tab

+

Make sure the Default Network that is created by default is moved up to "Allocated". +If not, you can click on the "+" icon in "Available".

+

VM Launch Instance Networks

+

Security Groups Tab

+

Make sure to add the security group where you enabled SSH. To add an SSH +security group first, see here.

+

VM Launch Instance Security Groups

+
+

How to update New Security Group(s) on any running VM?

+

If you want to attach/deattach any new Security Group(s) to/from a running VM +after it has launched. First create all new Security Group(s) with all the rules +required. Following this guide, +you'll be able to attach created security group(s) with all the +required rules to a running VM. You can modify the Rules setup for any Security +Group(s) but that will affect all VMs using that security groups.

+
+

Key Pair Tab

+

Add the key pair you created for your local machine/laptop to use with this VM. +To add a Key Pair first create and add them to your Project as described here.

+

VM Launch Instance Key Pairs

+
+

Important Note

+

If you did not provide a key pair, security groups, or rules, users can +access the instance only from inside the cloud through VNC. Even pinging the +instance is not possible without an ICMP rule configured. We recommend limiting +access as much as possible for best security practices.

+
+

Ignore other Tabs

+

Network Ports, Configuration, Server Groups, Schedular Hints, and Metadata: +tab: Please ignore these tabs as these are not important and only for advance setup.

+
+

How to use 'Configuration' tab

+

If you want to specify a customization script that runs after your instance +launches then you can write those custom script inside the +"Customization Script" text area. For example: +VM Launch Instance Configuration Script

+
+

You are now ready to launch your VM - go ahead and click "Launch Instance". This +will initiate an instance.

+

On a successful launch you would be redirected to Compute -> Instances tab and +can see the VM spawning.

+

Once your VM is successfully running you will see the Power State changes +from "No State" to "running".

+

VM Launch Instance Successful

+
+

Note

+

Here we explained about launching an instance using Image but you can also +launch an instance from the "instance snapshot" or "volume" or "volume snapshot" +option similar to the steps above. If you want to use OpenStack CLI to launch +a VM you can read this +or if you want to provision the NERC resources using Terraform you can +read this.

+
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/create-and-connect-to-the-VM/ssh-to-the-VM/index.html b/openstack/create-and-connect-to-the-VM/ssh-to-the-VM/index.html new file mode 100644 index 00000000..93aad08d --- /dev/null +++ b/openstack/create-and-connect-to-the-VM/ssh-to-the-VM/index.html @@ -0,0 +1,5054 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

SSH to the VM

+

Shell, or SSH, is used to administering and managing Linux workloads. +Before trying to access instances from the outside world, you need to make sure +you have followed these steps:

+
    +
  • +

    You followed the instruction in Create a Key Pair + to set up a public ssh key.

    +
  • +
  • +

    Your public ssh-key has selected (in "Key Pair" tab) while + launching the instance.

    +
  • +
  • +

    Assign a Floating IP to the instance in order to + access it from outside world.

    +
  • +
  • +

    Make sure you have added rules in the + Security Groups to + allow ssh using Port 22 access to the instance.

    +
  • +
+
+

How to update New Security Group(s) on any running VM?

+

If you want to attach/deattach any new Security Group(s) to/from a running VM +after it has launched. First create all new Security Group(s) with all the rules +required. Following this guide, +you'll be able to attach created security group(s) with all the +required rules to a running VM.

+
+

Make a note of the Floating IP you have associated to your instance.

+

Associated Instance Floating IP

+

In our example, the IP is 199.94.60.66.

+

Default usernames for all the base images are:

+
    +
  • +

    all Ubuntu images: ubuntu

    +
  • +
  • +

    all AlmaLinux images: almalinux

    +
  • +
  • +

    all Rocky Linux images: rocky

    +
  • +
  • +

    all Fedora images: fedora

    +
  • +
  • +

    all Debian images: debian

    +
  • +
  • +

    all RHEL images: cloud-user

    +
  • +
+
+

Removed Centos Images

+

If you still have VMs running with deleted CentOS images, you need to use +the following default username for your CentOS images: centos.

+
    +
  • all CentOS images: centos
  • +
+
+

Our example VM was launched with the ubuntu-22.04-x86_64 base image, the +user we need is 'ubuntu'.

+

Open a Terminal window and type:

+
ssh ubuntu@199.94.60.66
+
+

Since you have never connected to this VM before, you will be asked if you are +sure you want to connect. Type yes.

+

SSH To VM Successful

+
+

Important Note

+

If you haven't added your key to ssh-agent, you may need to specify the +private key file, like this: ssh -i ~/.ssh/cloud.key ubuntu@199.94.60.66

+

To add your private key to the ssh-agent you can follow the following steps:

+
    +
  1. +

    eval "$(ssh-agent -s)"

    +

    Output: Agent pid 59566

    +
  2. +
  3. +

    ssh-add ~/.ssh/cloud.key

    +

    If your private key is password protected, you'll be prompted to enter the +passphrase.

    +
  4. +
  5. +

    Verify that the key has been added by running ssh-add -l.

    +
  6. +
+
+

SSH to the VM using SSH Config

+

Alternatively, You can also configure the setting for the remote instances in +your SSH configuration file (typically found in ~/.ssh/config). The SSH configuration +file might include entry for your newly launched VM like this:

+
Host ExampleHostLabel
+    HostName 199.94.60.66
+    User ubuntu
+    IdentityFile ~/.ssh/cloud.key
+
+

Here, the Host value can be any label you want. The HostName value is the +Floating IP you have associated to your instance that you want to access, the +User value specifies the default account username based on your base OS image +used for the VM and IdentityFile specify the path to your Private Key on +your local machine. With this configuration defined, you can connect to the account +by simply using the Host value set as "ExampleHostLabel". You do not have to type +the username, hostname, and private key each time.

+

So, you can SSH into your host VM by running:

+
ssh ExampleHostLabel
+
+
+

Setting a password

+

When the VMs are launched, a strong, randomly-generated password is created for +the default user, and then discarded.

+

Once you connect to your VM, you will want to set a password in case you ever +need to log in via the console in the web dashboard.

+

For example, if your network connections aren't working correctly.

+
+

Setting a password is necessary to use Remote Desktop Protocol (RDP)

+

Remote Desktop Protocol(RDP) +is widely used for Windows remote connections, but you can also access +and interact with the graphical user interface of a remote Linux server by +using a tool like xrdp, an open-source implementation of +the RDP server. You can use xrdp to remotely access the Linux desktop. To +do so, you need to utilize the RDP client. Moreover, xrdp delivers a login +to the remote machines employing Microsoft RDP. This is why a user with +the password is necessary to access the VM. You can refer to this guide +on how to install and configure a RDP server using xrdp on a Ubuntu server +and access it using a RDP client from your local machine.

+
+

Since you are not using it to log in over SSH or to sudo, it doesn't really +matter how hard it is to type, and we recommend using a randomly-generated +password.

+

Create a random password like this:

+
ubuntu@test-vm:~$ cat /dev/urandom | base64 | dd count=14 bs=1
+T1W16HCyfZf8V514+0 records in
+14+0 records out
+14 bytes copied, 0.00110367 s, 12.7 kB/s
+
+

The 'count' parameter controls the number of characters.

+

The first [count] characters of the output are your randomly generated output, +followed immediately by "[count]+0", +so in the above example the password is: T1W16HCyfZf8V5.

+

Set the password for ubuntu using the command:

+
ubuntu@test-vm:~$ sudo passwd ubuntu
+New password:
+Retype new password:
+... password updated successfully
+
+

Store the password in a secure place. Don't send it over email, post it on your +wall on a sticky note, etc.

+

Adding other people's SSH keys to the instance

+

You were able to log in using your own SSH key.

+

Right now Openstack only permits one key to be added at launch, so you need to +add your teammates keys manually.

+

Get your teammates' public keys. If they used ssh-keygen to create their key, +this will be in a file called .pub on their machine.

+

If they created a key via the dashboard, or imported the key created with +ssh-keygen, their public key is viewable from the Key Pairs tab.

+

Click on the key pair name. The public key starts with 'ssh-rsa' and looks +something like this:

+
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDL6O5qNZHfgFwf4vnnib2XBub7ZU6khy6z6JQl3XRJg6I6gZ
++Ss6tNjz0Xgax5My0bizORcka/TJ33S36XZfzUKGsZqyEl/ax1Xnl3MfE/rgq415wKljg4
++QvDznF0OFqXjDIgL938N8G4mq/
+cKKtRSMdksAvNsAreO0W7GZi24G1giap4yuG4XghAXcYxDnOSzpyP2HgqgjsPdQue919IYvgH8shr
++sPa48uC5sGU5PkTb0Pk/ef1Y5pLBQZYchyMakQvxjj7hHZaT/
+Lw0wIvGpPQay84plkjR2IDNb51tiEy5x163YDtrrP7RM2LJwXm+1vI8MzYmFRrXiqUyznd
+test_user@demo
+
+

Create a file called something like 'teammates.txt' and paste in your team's +public keys, one per line.

+

Hang onto this file to save yourself from having to do all the copy/pasting +every time you launch a new VM.

+

Copy the file to the vm:

+
[you@your-laptop ~]$ scp teammates.txt ubuntu@199.94.60.66:~
+
+

If the copy works, you will see the output:

+
teammates.txt                  100%    0     0KB/s   00:00
+
+

Append the file's contents to authorized_keys:

+
[cloud-user@test-vm ~] #cat teammates.txt >> ~/.ssh/authorized_keys
+
+

Now your teammates should also be able to log in.

+
+

Important Note

+

Make sure to use >> instead of > to avoid overwriting your own key.

+
+
+

Adding users to the instance

+

You may decide that each teammate should have their own user on the VM instead +of everyone logging in to the default user.

+

Once you log into the VM, you can create another user like this.

+
+

Note

+

The 'sudo_group' is different for different OS - in CentOS and Red Hat, the +group is called 'wheel', while in Ubuntu, the group is called 'sudo'.

+
sudo su
+# useradd -m <username>
+# passwd <username>
+# usermod -aG <sudo_group> <username>    <-- skip this step for users who
+# should not have root access
+# su username
+cd ~
+mkdir .ssh
+chmod 700 .ssh
+cd .ssh
+vi authorized_keys   <-- paste the public key for that user in this file
+chmod 600 authorized_keys
+
+
+

How To Enable Remote Desktop Protocol Using xrdp on Ubuntu

+

Log in to the server with Sudo access

+

In order to install the xrdp, you need to login to the server with sudo access +to it.

+
ssh username@your_server_ip
+
+

For example:

+
ssh ubuntu@199.94.60.66
+
+

Installing a Desktop Environment

+

After connecting to your server using SSH update the list of available packages +using the following command:

+
sudo apt update
+
+

Next, install the xfce and xfce-goodies packages on your server:

+
sudo apt install xfce4 xfce4-goodies -y
+
+
+

Select Display Manager

+

If prompted to choose a display manager, which manages graphical login mechanisms +and user sessions, you can select any option from the list of available display +managers. For instance, here we have gdm3 as the default selection.

+

xrdp Display Manager

+
+

Installing xrdp

+

To install xrdp, run the following command in the terminal:

+
sudo apt install xrdp -y
+
+

After installing xrdp, verify the status of xrdp using systemctl:

+
sudo systemctl status xrdp
+
+

This command will show the status as active (running):

+

Output:

+
● xrdp.service - xrdp daemon
+    Loaded: loaded (/lib/systemd/system/xrdp.service; enabled; vendor preset: enab>
+    Active: active (running) since Mon 2024-02-12 21:33:01 UTC; 9s ago
+    ...
+    CGroup: /system.slice/xrdp.service
+            └─8839 /usr/sbin/xrdp
+
+
+

What if xrdp is not Running?

+

If the status of xrdp is not running, you may have to start the service manually +with this command: sudo systemctl start xrdp. After executing the above command, +verify the status again to ensure xrdp is in a running state.

+
+

Make xrdp use the desktop environment we previously created:

+
sudo sed -i.bak '/fi/a #xrdp multiple users configuration \n xfce-session \n' /etc/xrdp/startwm.sh
+
+

Configuring xrdp and Updating Security Groups

+

If you want to customize the default xrdp configuration (optional), you will need +to review the default configuration of xrdp, which is stored under /etc/xrdp/xrdp.ini. +xrdp.ini is the default configuration file to set up RDP connections to the +xrdp server. The configuration file can be modified and customized to meet the +RDP connection requirements.

+

Add a new security group with a RDP (port 3389) rule open to the public for a +RDP connection and attach that security group to your instance as described here.

+
+

How to Update Security Group(s) on a Running VM?

+

Following this guide, +you'll be able to attach created security group(s) with all the +required rules to a running VM.

+
+

Restart the xrdp server to make sure all the above changes are reflected:

+
sudo systemctl restart xrdp
+
+

Testing the RDP Connection

+

You should now be able to connect to the Ubuntu VM via xrdp.

+

Testing the RDP Connection on Windows

+

If you are using Windows as a local desktop, Windows users have a RDP connection +application by default on their machines.

+

Enter your VM's Floating IP and username into the fillable text boxes for Computer +and User name.

+

RDP Windows

+

You may need to press the down arrow for "Show Options" to input the username i.e. +ubuntu:

+

Show Options To Enter Username

+

Press the Connect button. If you receive an alert that the "Remote Desktop can't +connect to the remote computer", check that you have properly attached the security +group with a RDP (port 3389) rule open to the public to your VM as described here.

+

Press Yes if you receive the identity verification popup:

+

RDP Windows Popup

+

Then, enter your VM's username (ubuntu) and the password you created +for user ubuntu following this steps.

+

Press Ok.

+

xrdp Login Popup

+

Once you have logged in, you should be able to access your Ubuntu Desktop environment:

+

xrdp Desktop

+

Testing the RDP Connection on macOS

+

To test the connection using the Remote Desktop Connection client on macOS, first +launch the Microsoft Remote Desktop Connection app.

+

Press Add PC, then enter your remote server's Floating IP in the PC name +fillable box:

+

xrdp Add PC

+

You can Add a user account when setting up the connection:

+

xrdp Add User Account

+

Once you have logged in, you can access your Ubuntu remote desktop. You can close +it with the exit button.

+

Testing the RDP Connection on Linux

+

If you are using Linux as your Local desktop you can connect to the server via +Remmina.

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/access_popup.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/access_popup.png new file mode 100644 index 00000000..5573a3b1 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/access_popup.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/add_a_config.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/add_a_config.png new file mode 100644 index 00000000..c11838a6 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/add_a_config.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/available_instances.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/available_instances.png new file mode 100644 index 00000000..8c68e7d2 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/available_instances.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/client_config_file.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/client_config_file.png new file mode 100644 index 00000000..9a8ed8f0 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/client_config_file.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/client_config_installed_successfully.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/client_config_installed_successfully.png new file mode 100644 index 00000000..21c3dac6 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/client_config_installed_successfully.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/configuration.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/configuration.png new file mode 100644 index 00000000..025f5427 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/configuration.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/configuration_file_options.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/configuration_file_options.png new file mode 100644 index 00000000..667a12f1 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/configuration_file_options.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/configuration_mac.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/configuration_mac.png new file mode 100644 index 00000000..7c913c64 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/configuration_mac.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/connect_menu.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/connect_menu.png new file mode 100644 index 00000000..565b13e6 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/connect_menu.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/connect_vpn.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/connect_vpn.png new file mode 100644 index 00000000..d4e95663 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/connect_vpn.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/connected_notification.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/connected_notification.png new file mode 100644 index 00000000..b4c4fbbb Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/connected_notification.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/connection_successful.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/connection_successful.png new file mode 100644 index 00000000..95ded717 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/connection_successful.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/disconnect_using_tunnelblick_icon.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/disconnect_using_tunnelblick_icon.png new file mode 100644 index 00000000..3db5741f Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/disconnect_using_tunnelblick_icon.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/disconnect_vpn.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/disconnect_vpn.png new file mode 100644 index 00000000..910825c1 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/disconnect_vpn.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/dmg_installer.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/dmg_installer.png new file mode 100644 index 00000000..dd1402db Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/dmg_installer.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/file_imported_successful.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/file_imported_successful.png new file mode 100644 index 00000000..58041360 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/file_imported_successful.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/generate_client_nerc.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/generate_client_nerc.png new file mode 100644 index 00000000..103f3fb3 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/generate_client_nerc.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/import_config_file.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/import_config_file.png new file mode 100644 index 00000000..84287231 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/import_config_file.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/installation_complete.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/installation_complete.png new file mode 100644 index 00000000..dac6c6f4 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/installation_complete.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/installation_path_customization.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/installation_path_customization.png new file mode 100644 index 00000000..32ddf361 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/installation_path_customization.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/installation_setting.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/installation_setting.png new file mode 100644 index 00000000..cc6db051 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/installation_setting.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/logs.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/logs.png new file mode 100644 index 00000000..26ad4294 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/logs.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/no_config_alert.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/no_config_alert.png new file mode 100644 index 00000000..14145660 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/no_config_alert.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/notification_settings.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/notification_settings.png new file mode 100644 index 00000000..69f78fa2 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/notification_settings.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/openvpn_security_rule.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/openvpn_security_rule.png new file mode 100644 index 00000000..707ff6af Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/openvpn_security_rule.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/popup_open.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/popup_open.png new file mode 100644 index 00000000..9c722aa8 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/popup_open.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/preview_connection_log.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/preview_connection_log.png new file mode 100644 index 00000000..5ece5c7c Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/preview_connection_log.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/private_instance_accessible.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/private_instance_accessible.png new file mode 100644 index 00000000..e94379e6 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/private_instance_accessible.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/second_client_generate.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/second_client_generate.png new file mode 100644 index 00000000..9a475b7d Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/second_client_generate.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/security_groups.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/security_groups.png new file mode 100644 index 00000000..833c5a5d Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/security_groups.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/setup_client_completed.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/setup_client_completed.png new file mode 100644 index 00000000..f56b8c69 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/setup_client_completed.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/ssh_server.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/ssh_server.png new file mode 100644 index 00000000..d33f80e1 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/ssh_server.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/ssh_vpn_server.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/ssh_vpn_server.png new file mode 100644 index 00000000..e94379e6 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/ssh_vpn_server.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/start_openvpn_using_config_file.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/start_openvpn_using_config_file.png new file mode 100644 index 00000000..39a74669 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/start_openvpn_using_config_file.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/tunnel_successful.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/tunnel_successful.png new file mode 100644 index 00000000..b79a1f6e Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/tunnel_successful.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/tunnelblick_app_icon.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/tunnelblick_app_icon.png new file mode 100644 index 00000000..40313d43 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/tunnelblick_app_icon.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/tunnelblick_configuration_interface.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/tunnelblick_configuration_interface.png new file mode 100644 index 00000000..7c913c64 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/tunnelblick_configuration_interface.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/tunnelblick_download.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/tunnelblick_download.png new file mode 100644 index 00000000..e18855ae Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/tunnelblick_download.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/tunnelblick_icon.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/tunnelblick_icon.png new file mode 100644 index 00000000..40313d43 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/tunnelblick_icon.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/tunnelblick_interface.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/tunnelblick_interface.png new file mode 100644 index 00000000..cda0f1ee Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/tunnelblick_interface.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/user_authentication.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/user_authentication.png new file mode 100644 index 00000000..4eea62e3 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/user_authentication.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/user_to_authorize.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/user_to_authorize.png new file mode 100644 index 00000000..a1081ec5 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/user_to_authorize.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/vpn_details_menu.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/vpn_details_menu.png new file mode 100644 index 00000000..e9184aac Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/vpn_details_menu.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/windows_installer.png b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/windows_installer.png new file mode 100644 index 00000000..ac8ee1c9 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/images/windows_installer.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/index.html b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/index.html new file mode 100644 index 00000000..21fab30e --- /dev/null +++ b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/index.html @@ -0,0 +1,4724 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

OpenVPN

+

OpenVPN is a full-featured SSL VPN which implements OSI layer 2 or 3 secure +network extension using the industry standard SSL/TLS protocol, supports +flexible client authentication methods based on certificates, smart cards, and/ +or username/password credentials, and allows user or group-specific access +control policies using firewall rules applied to the VPN virtual interface.

+

OpenVPN offers a scalable client/server mode, allowing multiple clients to +connect to a single OpenVPN server process over a single TCP or UDP port.

+

Installing OpenVPN Server

+

You can read official documentation here.

+

You can spin up a new instance with "ubuntu-22.04-x86_64" or any available +Ubuntu OS image, named "openvpn_server" on OpenStack, with "default" +and "ssh_only" Security Groups attached to it.

+

Available instances

+

Also, attach a Floating IP to this instance so you can ssh into it from outside.

+

Create a new Security Group i.e. "openvpn" that is listening on +UDP port 1194 as shown below:

+

OpenVPN Security Rule

+

The Security Groups attached to the OpenVPN server should look similar to the image +below:

+

The Security Groups attached to the OpenVPN server includes "default", +"ssh_only" and "openvpn". It should look similar to the image shown below:

+

Security Groups

+

Finally, you'll want to configure the setting for the remote instances in your +SSH configuration file (typically found in ~/.ssh/config). The SSH +configuration file might include entry for your newly created OpenVPN server +like this:

+
Host openvpn
+  HostName 199.94.60.66
+  User ubuntu
+  IdentityFile ~/.ssh/cloud.key
+
+
    +
  1. +

    Then you can ssh into the OpenVPN Server running: ssh openvpn

    +

    SSH OpenVPN server

    +
  2. +
  3. +

    Also note that OpenVPN must be installed and run by a user who has + administrative/root privileges. So, we need to run the command: sudo su

    +
  4. +
  5. +

    We are using this repo to install + OpenVPN server on this ubuntu server.

    +

    For that, run the script and follow the assistant:

    +
    wget https://git.io/vpn -O openvpn-install.sh && bash openvpn-install.sh
    +
    +

    Generating first client

    +

    You can press Enter for all default values. And, while entering a name +for the first client you can give "nerc" as the client name, this will +generate a new configuration file (.ovpn file) named as "nerc.ovpn". +Based on your client's name it will name the config file as +".ovpn"

    +

    Setup Client completed

    +
  6. +
  7. +

    Copy the generated config file from "/root/nerc.ovpn" to "/home/ubuntu/ + nerc.ovpn" by running: cp /root/nerc.ovpn .

    +
  8. +
  9. +

    Update the ownership of the config file to ubuntu user and ubuntu group by + running the following command: chown ubuntu:ubuntu nerc.ovpn

    +
  10. +
  11. +

    You can exit from the root and ssh session all together and then copy the + configuration file to your local machine by running the following script on + your local machine's terminal: scp openvpn:nerc.ovpn .

    +
  12. +
+

To add a new client user

+

Once it ends, you can run it again to add more users, remove some of them or +even completely uninstall OpenVPN.

+

For this, run the script and follow the assistant:

+
wget https://git.io/vpn -O openvpn-install.sh && bash openvpn-install.sh
+
+

Second Client Generate

+

Here, you are giving client name as "mac_client" and that will generate a +new configuration file at "/root/mac_client.ovpn". You can repeat above +steps: 4 to 6 to copy this new client's configuration file and share it to +the new client.

+
+
+

Important Note

+

You need to contact your project administrator to get your own OpenVPN +configuration file (file with .ovpn extension). Download it and Keep it in +your local machine so in next steps we can use this configuration client +profile file.

+
+

A OpenVPN client or compatible software is needed to connect to the OpenVPN +server. Please install one of these clients depending on your device. The +client program must be configured with a client profile to connect to the +OpenVPN server.

+

Windows

+

OpenVPN source code and Windows installers can be downloaded here. The OpenVPN executable should be installed +on both server and client machines since the single executable provides both +client and server functions. Please see the OpenVPN client setup guide for +Windows.

+

Mac OS X

+

The client we recommend and support for Mac OS is Tunnelblick. To install +Tunnelblick, download the dmg installer file from the Tunnelblick site, mount the dmg, and drag the Tunnelblick +application to Applications. Please refer to +this guide for more information.

+

Linux

+

OpenVPN is available through the package management system on most Linux distributions.

+

On Debian/Ubuntu:

+
sudo apt-get install openvpn
+
+

On RedHat/Rocky/AlmaLinux:

+
sudo dnf install openvpn
+
+

Then, to run OpenVPN using the client profile:

+

Move the VPN client profile (configuration) file to /etc/openvpn/ :

+
sudo mv nerc.ovpn /etc/openvpn/client.conf
+
+

Restart the OpenVPN daemon (i.e., This will start OpenVPN connection and will +automatically run on boot):

+
sudo /etc/init.d/openvpn start
+
+

OR,

+
sudo systemctl enable --now openvpn@client
+sudo systemctl start openvpn@client
+
+

Checking the status:

+
systemctl status openvpn@client
+
+

Alternatively, if you want to run OpenVPN manually each time, then run:

+
sudo openvpn --config /etc/openvpn/client.ovpn
+
+

OR,

+
sudo openvpn --config nerc.ovpn
+
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/openvpn_gui_for_windows/index.html b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/openvpn_gui_for_windows/index.html new file mode 100644 index 00000000..3c85feb6 --- /dev/null +++ b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/openvpn_gui_for_windows/index.html @@ -0,0 +1,4664 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

OpenVPN-GUI

+

Official OpenVPN Windows installers +include a Windows +OpenVPN-GUI, which +allows managing OpenVPN connections from a system tray applet.

+

Find your client account credentials

+

You need to contact your project administrator to get your own OpenVPN +configuration file (file with .ovpn extension). Download it and Keep it in your +local machine so in next steps we can use this configuration client profile file.

+

Download and install OpenVPN-GUI

+
    +
  1. +

    Download the OpenVPN client installer:

    +

    OpenVPN for Windows can be installed from the self-installing exe file on the +OpenVPN download page. Also note +that OpenVPN must be installed and run by a user who has administrative +privileges (this restriction is imposed by Windows, not OpenVPN)

    +
  2. +
  3. +

    Launch the installer and follow the prompts as directed.

    +

    Windows Installer

    +
  4. +
  5. +

    Clicking "Customize" button we can see settings and features of OpenVPN + GUI client.

    +

    Installation Customization

    +
  6. +
  7. +

    Click "Install Now" to continue.

    +

    Installation Complete

    +
  8. +
  9. +

    Click "Close"button.

    +
  10. +
  11. +

    For the newly installed OpenVPN GUI there will be no configuration profile + for the client so it will show a pop up that alerts:

    +

    No Config Alert

    +
  12. +
+

Set up the VPN with OpenVPN GUI

+

After you've run the Windows installer, OpenVPN is ready for use and will +associate itself with files having the .ovpn extension.

+
    +
  1. +

    You can use the previously downloaded .ovpn file from your Downloads folder + to setup the connection profiles.

    +

    a. Either you can Right click on the OpenVPN configuration file (.ovpn) and +select "Start OpenVPN on this config file":

    +

    Start OpenVPN on selected config file

    +

    b. OR, you can use "Import file…" menu to select the previously +downloaded .ovpn file.

    +

    Import file from taskbar app

    +

    Once, done it will show:

    +

    File Imported Successful Alert

    +

    c. OR, you can manually copy the config file to one of OpenVPN's +configuration directories:

    +
    C:\Program Files\OpenVPN\config (global configs)
    +C:\Program Files\OpenVPN\config-auto (autostarted global configs)
    +%USERPROFILE%\OpenVPN\config (per-user configs)
    +
    +
  2. +
+

Connect to a VPN server location

+

For launching OpenVPN Connections you click on OpenVPN GUI (tray applet). +OpenVPN GUI is used to launching VPN connections on demand. OpenVPN GUI is a +system-tray applet, so an icon for the GUI will appear in the lower-right +corner of the screen located at the taskbar notification area. Right click on +the system tray icon, and if you have multiple configurations then a menu +should appear showing the names of your OpenVPN configuration profiles and +giving you the option to connect. If you have only one configuration then you +can just click on "Connect" menu.

+

Connect Menu

+

Connection Successful

+

When you are connected to OpenVPN server successfully, you will see popup +message as shown below. That's it! You are now connected to a VPN.

+

Connected Notification

+

Once you are connected to the OpenVPN server, you can run commands like shown +below in your terminal to connect to the private instances: ssh ubuntu@192.168. +0.40 -A -i cloud.key

+

SSH VPN Server

+

Disconnect VPN server

+

To disconnect, right click on the system tray icon, in your status bar and +select Disconnect from the menu.

+

Disconnect VPN server

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/tunnelblick_for_macos/index.html b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/tunnelblick_for_macos/index.html new file mode 100644 index 00000000..085d4bd5 --- /dev/null +++ b/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/tunnelblick_for_macos/index.html @@ -0,0 +1,4702 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Tunnelblick

+

Tunnelblick is a free, open-source GUI (graphical user interface) for OpenVPN +on macOS and OS X: More details can be found here. +Access to a VPN server — your computer is one end of the tunnel and the VPN +server is the other end.

+

Find your client account credentials

+

You need to contact your project administrator to get your own OpenVPN +configuration file (file with .ovpn extension). Download it and Keep it in your +local machine so in next steps we can use this configuration client profile file.

+

Download and install Tunnelblick

+
    +
  1. +

    Download Tunnelblick, a free and + user-friendly app for managing OpenVPN connections on macOS.

    +

    Tunnelblick Download

    +
  2. +
  3. +

    Navigate to your Downloads folder and double-click the Tunnelblick + installation file (.dmg installer file) you have just downloaded.

    +

    dmg Installer File

    +
  4. +
  5. +

    In the window that opens, double-click on the Tunnelblick icon.

    +

    Tunnelblick Interface

    +
  6. +
  7. +

    A new dialogue box will pop up, asking you if you are sure you want to open + the app. Click Open.

    +

    Popup Open Confirmation

    +

    Access Popup

    +
  8. +
  9. +

    You will be asked to enter your device password. Enter it and click OK:

    +

    User Password prompt to Authorize

    +
  10. +
  11. +

    Select Allow or Don't Allow for your notification preference.

    +

    Notification Settings

    +
  12. +
  13. +

    Once the installation is complete, you will see a pop-up notification asking + you if you want to launch Tunnelblick now. (An administrator username and + password will be required to secure Tunnelblick). Click Launch.

    +

    Alternatively, you can click on the Tunnelblick icon in the status bar +and select VPN Details...:

    +

    VPN Details Menu

    +

    Configuration

    +
  14. +
+

Set up the VPN with Tunnelblick

+
    +
  1. +

    A new dialogue box will appear. Click I have configuration files.

    +

    Configuration File Options

    +
  2. +
  3. +

    Another notification will pop-up, instructing you how to import + configuration files. Click OK.

    +

    Add A Configuration

    +
  4. +
  5. +

    Drag and drop the previously downloaded .ovpn file from your Downloads + folder to the Configurations tab in Tunnelblick.

    +

    Load Client Config File

    +

    OR,

    +

    You can just drag and drop the provided OpenVPN configuration file (file +with .ovpn extension) directly to Tunnelblick icon in status bar at the +top-right corner of your screen.

    +

    Load config on Tunnelblick

    +
  6. +
  7. +

    A pop-up will appear, asking you if you want to install the configuration + profile for your current user only or for all users on your Mac. Select your + preferred option. If the VPN is intended for all accounts on your Mac, select + All Users. If the VPN will only be used by your current account, select + Only Me.

    +

    Configuration Installation Setting

    +
  8. +
  9. +

    You will be asked to enter your Mac password.

    +

    User Login for Authentication

    +

    Loaded Client Configuration

    +

    Then the screen reads "Tunnelblick successfully: installed one configuration".

    +

    VPN Configuration Installed Successfully

    +
  10. +
+

You can see the configuration setting is loaded and installed successfully.

+

Connect to a VPN server location

+
    +
  1. +

    To connect to a VPN server location, click the Tunnelblick icon in status + bar at the top-right corner of your screen.

    +

    Tunnelblick icon in status bar

    +
  2. +
  3. +

    From the drop down menu select the server and click Connect [name of + the .ovpn configuration file]..

    +

    Connect VPN

    +

    Alternatively, you can select "VPN Details" from the menu and then +click the "Connect"button:

    +

    Tunnelblick Configuration Interface

    +

    This will show the connection log on the dialog:

    +

    Connection Log

    +
  4. +
  5. +

    When you are connected to OpenVPN server successfully, you will see popup + message as shown below. That's it! You are now connected to a VPN.

    +

    Tunnel Successful

    +
  6. +
  7. +

    Once you are connected to the OpenVPN server, you can run commands like + shown below to connect to the private instances:

    +
    ssh ubuntu@192.168.0.40 -A -i cloud.key
    +
    +

    Private Instance SSH Accessible

    +
  8. +
+

Disconnect VPN server

+

To disconnect, click on the Tunnelblick icon in your status bar and select +Disconnect in the drop-down menu.

+

Disconnect using Tunnelblick icon

+

While closing the log will be shown on popup as shown below: +Preview Connection Log

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/sshuttle/images/available_instances.png b/openstack/create-and-connect-to-the-VM/using-vpn/sshuttle/images/available_instances.png new file mode 100644 index 00000000..f431daec Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/sshuttle/images/available_instances.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/sshuttle/images/client_connected.png b/openstack/create-and-connect-to-the-VM/using-vpn/sshuttle/images/client_connected.png new file mode 100644 index 00000000..11730c4e Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/sshuttle/images/client_connected.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/sshuttle/images/security_groups.png b/openstack/create-and-connect-to-the-VM/using-vpn/sshuttle/images/security_groups.png new file mode 100644 index 00000000..d130a0d7 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/sshuttle/images/security_groups.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/sshuttle/images/ssh_server.png b/openstack/create-and-connect-to-the-VM/using-vpn/sshuttle/images/ssh_server.png new file mode 100644 index 00000000..dd84bca0 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/sshuttle/images/ssh_server.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/sshuttle/index.html b/openstack/create-and-connect-to-the-VM/using-vpn/sshuttle/index.html new file mode 100644 index 00000000..55efaaca --- /dev/null +++ b/openstack/create-and-connect-to-the-VM/using-vpn/sshuttle/index.html @@ -0,0 +1,4689 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

sshuttle

+

sshuttle is a lightweight SSH-encrypted VPN. This is a Python based script that +allows you to tunnel connections through SSH in a far more efficient way then +traditional ssh proxying.

+

Installing sshuttle Server

+

You can spin up a new instance with "ubuntu-22.04-x86_64" or any available +Ubuntu OS image, named "sshuttle_server" on OpenStack, with +"default" and "ssh_only" Security Groups attached to it.

+

Available instances

+

Also, attach a Floating IP to this instance so you can ssh into it from outside.

+

Security Groups

+

Finally, you'll want to configure the setting for the remote instances in your +SSH configuration file (typically found in ~/.ssh/config). The SSH +configuration file might include entry for your newly created sshuttle server +like this:

+
Host sshuttle
+
+  HostName 140.247.152.244
+  User ubuntu
+  IdentityFile ~/.ssh/cloud.key
+
+
    +
  1. Then you can ssh into the sshuttle Server running: ssh sshuttle
  2. +
+

SSH sshuttle server

+
+

Note

+

Unlike other VPN servers, for sshuttle you don't need to install +anything on the server side. As long as you have an SSH server (with +python3 installed) you're good to go.

+
+

To connect from a new client Install sshuttle

+

Windows

+

Currently there is no built in support for running sshuttle directly on +Microsoft Windows. What you can do is to create a Linux VM with Vagrant (or +simply Virtualbox if you like) and then try to connect via that VM. For more +details read here

+

Mac OS X

+

Install using Homebrew:

+
brew install sshuttle
+
+

OR, via MacPorts

+
sudo port selfupdate
+sudo port install sshuttle
+
+

Linux

+

sshuttle is available through the package management system on most Linux distributions.

+

On Debian/Ubuntu:

+
sudo apt-get install sshuttle
+
+

On RedHat/Rocky/AlmaLinux:

+
sudo dnf install sshuttle
+
+

It is also possible to install into a virtualenv as a non-root user.

+
    +
  • From PyPI:
  • +
+
virtualenv -p python3 /tmp/sshuttle
+. /tmp/sshuttle/bin/activate
+pip install sshuttle
+
+
    +
  • Clone:
  • +
+
virtualenv -p python3 /tmp/sshuttle
+. /tmp/sshuttle/bin/activate
+git clone [https://github.com/sshuttle/sshuttle.git](https://github.com/sshuttle/sshuttle.git)
+cd sshuttle
+./setup.py install
+
+

How to Connect

+

Tunnel to all networks (0.0.0.0/0):

+
sshuttle -r ubuntu @140.247.152.244 0.0.0.0/0
+
+

OR, shorthand:

+
sudo sshuttle -r ubuntu@140.247.152.244 0/0
+
+

If you would also like your DNS queries to be proxied through the DNS server of +the server, you are connected to:

+
sshuttle --dns -r ubuntu@140.247.152.244 0/0
+
+

sshuttle Client connected

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/add_vpn_config_popup.png b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/add_vpn_config_popup.png new file mode 100644 index 00000000..fb355d34 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/add_vpn_config_popup.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/app.png b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/app.png new file mode 100644 index 00000000..e859e6b7 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/app.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/available_instances.png b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/available_instances.png new file mode 100644 index 00000000..864da978 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/available_instances.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/block_untunnelled_traffic_option.png b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/block_untunnelled_traffic_option.png new file mode 100644 index 00000000..19e514d9 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/block_untunnelled_traffic_option.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/browse_import_config_file.png b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/browse_import_config_file.png new file mode 100644 index 00000000..4296c25e Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/browse_import_config_file.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/client_config_template.png b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/client_config_template.png new file mode 100644 index 00000000..75f5a41d Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/client_config_template.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/deactivate.png b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/deactivate.png new file mode 100644 index 00000000..098b84a0 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/deactivate.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/deactivate_connection.png b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/deactivate_connection.png new file mode 100644 index 00000000..1b5ca6c0 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/deactivate_connection.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/edit_tunnel_config.png b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/edit_tunnel_config.png new file mode 100644 index 00000000..51611495 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/edit_tunnel_config.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/generate_client_nerc.png b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/generate_client_nerc.png new file mode 100644 index 00000000..40c39600 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/generate_client_nerc.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/import_config_file.png b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/import_config_file.png new file mode 100644 index 00000000..405d69f3 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/import_config_file.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/import_config_file_mac.png b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/import_config_file_mac.png new file mode 100644 index 00000000..0eb68485 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/import_config_file_mac.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/imported_config.png b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/imported_config.png new file mode 100644 index 00000000..f30ee078 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/imported_config.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/mac_import_config_file.png b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/mac_import_config_file.png new file mode 100644 index 00000000..21cc875a Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/mac_import_config_file.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/on_demand_option.png b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/on_demand_option.png new file mode 100644 index 00000000..17763937 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/on_demand_option.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/second_client_generate.png b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/second_client_generate.png new file mode 100644 index 00000000..d13c7757 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/second_client_generate.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/security_groups.png b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/security_groups.png new file mode 100644 index 00000000..9e1ef30f Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/security_groups.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/setup.png b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/setup.png new file mode 100644 index 00000000..3945aa5b Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/setup.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/setup_client_completed.png b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/setup_client_completed.png new file mode 100644 index 00000000..bc04bf52 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/setup_client_completed.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/ssh_server.png b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/ssh_server.png new file mode 100644 index 00000000..fc05d41d Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/ssh_server.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/ssh_vpn_server.png b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/ssh_vpn_server.png new file mode 100644 index 00000000..e94379e6 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/ssh_vpn_server.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/tunnel_activated.png b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/tunnel_activated.png new file mode 100644 index 00000000..ae00db2f Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/tunnel_activated.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/tunnel_activated_mac.png b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/tunnel_activated_mac.png new file mode 100644 index 00000000..5354f39f Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/tunnel_activated_mac.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/tunnel_public_info.png b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/tunnel_public_info.png new file mode 100644 index 00000000..8e7b0071 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/tunnel_public_info.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/wireguard_app_icon.png b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/wireguard_app_icon.png new file mode 100644 index 00000000..92f5d933 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/wireguard_app_icon.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/wireguard_security_rule.png b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/wireguard_security_rule.png new file mode 100644 index 00000000..b9738673 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/wireguard_security_rule.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/wireguard_taskbar_icon.png b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/wireguard_taskbar_icon.png new file mode 100644 index 00000000..692b63d8 Binary files /dev/null and b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/images/wireguard_taskbar_icon.png differ diff --git a/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/index.html b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/index.html new file mode 100644 index 00000000..7424a758 --- /dev/null +++ b/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/index.html @@ -0,0 +1,4994 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

WireGuard

+

WireGuard is an extremely simple yet fast and +modern VPN that utilizes state-of-the-art cryptography.

+

Here's what it will look like:

+

WireGuard setup

+

Installing WireGuard Server

+

You can spin up a new instance with "ubuntu-22.04-x86_64" or any available +Ubuntu OS image, named "wireguard_server" on OpenStack, with +"default" and "ssh_only" Security Groups attached to it.

+

Available instances

+

Also, attach a Floating IP to this instance so you can ssh into it from outside.

+

Create a new Security Group i.e. "wireguard" that is listening on +UDP port 51820 as shown below:

+

WireGuard Security Rule

+

The Security Groups attached to the WireGuard server includes "default", +"ssh_only" and "wireguard". It should look similar to the image shown below:

+

Security Groups

+

Finally, you'll want to configure the setting for the remote instances in your +SSH configuration file (typically found in ~/.ssh/config). The SSH +configuration file might include entry for your newly created WireGuard server +like this:

+
Host wireguard
+  HostName 140.247.152.188
+  User ubuntu
+  IdentityFile ~/.ssh/cloud.key
+
+
    +
  1. +

    Then you can ssh into the WireGuard Server running: ssh wireguard

    +

    SSH sshuttle server

    +
  2. +
  3. +

    Also note that WireGuard must be installed and run by a user who has + administrative/root privileges. So, we need to run the command: sudo su

    +
  4. +
  5. +

    We are using this repo to + install WireGuard server on this ubuntu server.

    +

    For that, run the script and follow the assistant:

    +
    wget https://git.io/wireguard -O wireguard-install.sh && bash wireguard-install.sh
    +
    +

    Generating first client

    +

    You can press Enter for all default values. And, while entering a name +for the first client you can give "nerc" as the client name, this will +generate a new configuration file (.conf file) named as "nerc.conf". Based +on your client's name it will name the config file as ".conf"

    +

    Setup Client completed

    +

    NOTE: For each peers the client configuration files comply with the +following template:

    +

    Client Config Template

    +
  6. +
  7. +

    Copy the generated config file from "/root/nerc.conf" to + "/home/ubuntu/nerc.conf" by running: cp /root/nerc.conf .

    +
  8. +
  9. +

    Update the ownership of the config file to ubuntu user and ubuntu group by + running the following command: chown ubuntu:ubuntu nerc.conf

    +
  10. +
  11. +

    You can exit from the root and ssh session all together and then copy the + configuration file to your local machine by running the following script on + your local machine's terminal: scp wireguard:nerc.conf .

    +
  12. +
+

To add a new client user

+

Once it ends, you can run it again to add more users, remove some of them or +even completely uninstall WireGuard.

+

For this, run the script and follow the assistant:

+
wget https://git.io/wireguard -O wireguard-install.sh && bash wireguard-install.sh
+
+

Second Client Generate

+

Here, you are giving client name as "mac_client" and that will +generate a new configuration file at "/root/mac_client.conf". +You can repeat above steps: 4 to 6 to copy this new client's configuration +file and share it to the new client.

+

Authentication Mechanism

+

It would be kind of pointless to have our VPN server allow anyone to connect. +This is where our public & private keys come into play.

+
    +
  • +

    Each client's **public** key needs to be added to the + SERVER'S configuration file

    +
  • +
  • +

    The server's **public** key added to the CLIENT'S + configuration file

    +
  • +
+

Useful commands

+

To view server config: wg show or, wg

+

To activateconfig: wg-quick up /path/to/file_name.config

+

To deactivate config: wg-quick down /path/to/file_name.config

+

Read more:

+

https://git.zx2c4.com/wireguard-tools/about/src/man/wg.8

+

https://git.zx2c4.com/wireguard-tools/about/src/man/wg-quick.8

+
+
+

Important Note

+

You need to contact your project administrator to get your own WireGUard + configuration file (file with .conf extension). Download it and Keep it in + your local machine so in next steps we can use this configuration client + profile file.

+
+

A WireGuard client or compatible software is needed to connect to the WireGuard +VPN server. Please installone of these clients +depending on your device. The client program must be configured with a client +profile to connect to the WireGuard VPN server.

+

Windows

+

WireGuard client can be downloaded here. +The WireGuard executable should be installed on client machines. After the +installation, you should see the WireGuard icon in the lower-right corner of +the screen located at the taskbar notification area.

+

WireGuard taskbar icon

+

Set up the VPN with WireGuard GUI

+

Next, we configure the VPN tunnel. This includes setting up the endpoints and +exchanging the public keys.

+

Open the WireGuard GUI and either click on Add Tunnel -> Import tunnel(s) +from file… OR,

+

click on "Import tunnel(s) from file" button located at the center.

+

Import Config File

+

The software automatically loads the client configuration. Also, it creates a +public key for this new tunnel and displays it on the screen.

+

Imported Config

+

Either, Right Click on your tunnel name and select +"Edit selected tunnel…" menu OR, click on +"Edit" button at the lower left.

+

Edit selected Tunnel Config

+

Checking Block untunneled traffic (kill-switch) will make sure that all +your traffic is being routed through this new VPN server.

+

Block Untunnelled Traffic Option

+

Test your connection

+

On your Windows machine, press the "Activate" button. You should +see a successful connection be made:

+

Tunnel Activated

+

After a few seconds, the status should change to Active.

+

If the connection is routed through the VPN, it should show the IP address of +the WireGuard server as the public address.

+

If that's not the case, to troubleshoot please check the "Log" +tab and verify and validate the client and server configuration.

+

Clicking " Deactivate" button closes the VPN connection.

+

Deactivate Connection

+

Mac OS X

+

I. Using HomeBrew

+

This allows more than one Wireguard tunnel active at a time unlike the +WireGuard GUI app.

+
    +
  1. +

    Install WireGuard CLI on macOS through brew: brew install wireguard-tools

    +
  2. +
  3. +

    Copy the ".conf" file to + "/usr/local/etc/wireguard/" (or "/etc/wireguard/"). + You'll need to create the " wireguard" directory first. For your + example, you will have your config file located at: " /usr/local/etc + /wireguard/mac_client.conf" or, "/etc/wireguard/mac_client.conf"

    +
  4. +
  5. +

    To activate the VPN: "wg-quick up [name of the conf file without + including .conf extension]". For example, in your case, running + wg-quick up mac_client - If the peer system is already configured + and its interface is up, then the VPN connection should establish + automatically, and you should be able to start routing traffic through the peer.

    +
  6. +
+

Use wg-quick down mac_client to take the VPN connection down.

+

II. Using WireGuard GUI App

+
    +
  1. +

    Download WireGuard Client from the macOS App Store

    +

    You can find the official WireGuard Client app on the App Store here.

    +

    WireGuard Client App

    +
  2. +
  3. +

    Set up the VPN with WireGuard

    +

    Next, we configure the VPN tunnel. This includes setting up the endpoints +and exchanging the public keys.

    +

    Open the WireGuard GUI by directly clicking WireGuard icon in status bar at +the top-right corner of your screen.

    +

    WireGuard app icon

    +

    And then click on "Import tunnel(s) from file" menu to load your +client config file.

    +

    Import Config File in Mac

    +

    OR,

    +

    Find and click the WireGUard GUI from your Launchpad and then either click +on Add Tunnel -> Import tunnel(s) from file… or, just click on "Import +tunnel(s) from file" button located at the center.

    +

    Import Config File in Mac

    +

    Browse to the configuration file:

    +

    Browse and Locate Import Config File

    +

    The software automatically loads the client configuration. Also, it creates +a public key for this new tunnel and displays it on the screen.

    +

    Add VPN Config Popup

    +

    Tunnel Public Info

    +

    If you would like your computer to automatically connect to the WireGuard +VPN server as soon as either (or both) Ethernet or Wi-Fi network adapter +becomes active, check the relevant 'On-Demand' checkboxes for +"Ethernet" and " Wi-Fi".

    +

    Checking Exclude private IPs will generate a list of networks which +excludes the server IP address and add them to the AllowedIPs list. This +setting allows you to pass all your traffic through your Wireguard VPN +EXCLUDING private address ranges like 10.0.0.0/8, 172.16.0.0/12, +and 192.168.0.0/16.

    +

    On-Demand Option for Ethernet and WiFi

    +
  4. +
  5. +

    Test your connection

    +

    On your Windows machine, press the "Activate" button. You should see a +successful connection be made:

    +

    Tunnel Activated in Mac.png

    +

    After a few seconds, the status should change to Active.

    +

    Clicking "Deactivate" button from the GUI's interface or +directly clicking "Deactivate" menu from the WireGuard icon in +status bar at the top-right corner of your screen closes the VPN connection.

    +

    Deactivate Connection

    +
  6. +
+

Linux

+

WireGuard is available through the package management system on most Linux distributions.

+

On Debian/Ubuntu:

+
sudo apt update
+sudo apt-get install wireguard resolvconf -y
+
+

On RedHat/Rocky/AlmaLinux:

+
sudo dnf install wireguard
+
+

Then, to run WireGuard using the client profile: +Move the VPN client profile (configuration) file to /etc/wireguard/:

+
sudo mv nerc.conf /etc/wireguard/client.conf
+
+

Restart the WireGuard daemon (i.e., This will start WireGuard connection and +will automatically run on boot):

+
sudo /etc/init.d/wireguard start
+
+

OR,

+
sudo systemctl enable --now wg-quick@client
+sudo systemctl start wg-quick@client
+
+

OR,

+
wg-quick up /etc/wireguard/client.conf
+
+

Checking the status:

+
systemctl status wg-quick@client
+
+

Alternatively, if you want to run WireGuard manually each time, then run:

+
sudo wireguard --config /etc/wireguard/client.conf
+
+

OR,

+
sudo wireguard --config nerc.conf
+
+

To test the connection

+

Once you are connected to the WireGuard server, you can run commands like shown +below in your terminal to connect to the private instances: ssh ubuntu@192.168. +0.40 -A -i cloud.key

+

SSH VPN Server

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/data-transfer/data-transfer-from-to-vm/index.html b/openstack/data-transfer/data-transfer-from-to-vm/index.html new file mode 100644 index 00000000..29177cc8 --- /dev/null +++ b/openstack/data-transfer/data-transfer-from-to-vm/index.html @@ -0,0 +1,5598 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Data Transfer To/From NERC VM

+

Transfer using Volume

+

You may wish to transfer a volume which includes all data to a different project +which can be your own (with access in project dropdown list) or external collaborators +with in NERC. For this you can follow this guide.

+
+

Very Important Note

+

If you transfer the volume then that will be removed from the source and will +only be available on destination project.

+
+

Using Globus

+

Globus is a web-based service that is the preferred method +for transferring substantial data between NERC VM and other locations. It effectively +tackles the typical obstacles researchers encounter when moving, sharing, and +storing vast quantities of data. By utilizing Globus, you can delegate data transfer +tasks to a managed service that oversees the entire process. This service monitors +performance and errors, retries failed transfers, automatically resolves issues +whenever feasible, and provides status updates to keep you informed. This allows +you to concentrate on your research while relying on Globus to handle data movement +efficiently. For information on the user-friendly web interface of Globus and its +flexible REST/API for creating scripted tasks and operations, please visit +Globus.org.

+
+

Important Information

+

For large data sets and/or for access by external users, consider using Globus. +An institutional endpoint/collection is not required to use Globus - you can +set up a personal endpoint on your NERC VM and also on your local machine if +you need to transfer large amounts of data.

+
+

Setting up a Personal Globus Endpoint on NERC VM

+

You can do this using Globus Connect Personal +to configure an endpoint on your NERC VM. In general, it is always fastest to setup +a Personal endpoint on your NERC VM, and then use that endpoint for transfers +to/from a local machine or any other shared or private Globus endpoints.

+

You can find instructions for downloading and installing the Globus Connect Personal +on the Globus web site.

+
+

Helpful Tip

+

You may get a "Permission Denied" error for certain paths with Globus Connect +Personal. If you do, you may need to add this path to your list of allowed +paths for Globus Connect Personal. You can do this by editing the +~/.globusonline/lta/config-paths file and adding the new path as a line in +the end of the list. The path must be followed by sharing (0/1) and +R/W (0/1) flags.

+

For example, to enable read-write access to the /data/tables directory, add +the following line i.e. /data/tables,0,1.

+
+

Usage of Globus

+

Once a Personal Endpoint is set up on a NERC VM, you will be able to find that named +collection on Globus file explorer +and then can be chosen as source or destination for data transfer to/from another +Guest Collection (Globus Shared Endpoints). Login into the +Globus web interface, select your organization +which will allow you to log in to Globus, and land on +File Manager page.

+

If your account belong to Globus Subscription +that you will be able to use data transfers between two personal endpoints +i.e. you can setup your local machine as another personal endpoint.

+

Globus Transfer

+

Using SCP

+
+

Important Information

+

SCP is suggested for smaller files (<~10GB), otherwise use Globus. +When you want to transfer many small files in a directory, we recommend Globus.

+
+

We generally recommend using SCP (Secure Copy) to copy data to and from your VM. +SCP is used to securely transfer files between two hosts using the Secure Shell +(ssh) protocol. It’s usage is simple, but the order that file locations are +specified is crucial. SCP always expects the 'from' location first, then the 'to' +destination. Depending on which is the remote system, you will prefix your username +and Floating IP of your NERC VM.

+

scp [username@Floating_IP:][location of file] [destination of file]

+

or,

+

scp [location of file] [username@Floating_IP:][destination of file]

+

Usage

+

Below are some examples of the two most common scenarios of SCP to copy to and from +various sources.

+
+

Helpful Tip

+

We use '~' in the examples. The tilde '~' is a Unix short-hand that means +"my home directory". So if user almalinux uses ~/ this is the same as typing +out the full path to almalinux user's home directory (easier to remember than +/home/almalinux/). You can, of course, specify other paths (ex. – +/user/almalinux/output/files.zip) Also, we use . in the examples to specify +the current directory path from where the command is issued. This can be +replaced with the actual path.

+
+

i. Copying Files From the NERC VM to Another Computer:

+

From a terminal/shell from your local machine, you'll issue your SCP command by +specifying the SSH Private Key to connect with the VM that has included corresponding +SSH Public Key. The syntax is:

+
scp -i <Your SSH Private Key including Path> <Default User name based on OS>@<Your Floating IP of VM>:~/<File In VM> .
+
+

This copies the file <File In VM> from your VM's default user's directory (~ +is a Unix shortcut for my home directory) on your VM to your current directory +(. is a Unix shortcut the current directory) on your computer from where the command +is issued or you can specify the actual path instead of ..

+

For e.g.

+
scp -i ~/.ssh/your_pem_key_file.pem almalinux@199.94.60.219:~/myfile.zip /my_local_directory/
+
+

ii. Copying Files From Another Computer to the NERC VM:

+

From a terminal/shell on your computer (or another server or cluster) where you +have access to the SSH Private Key, you'll issue your SCP command. The syntax is:

+
scp -i <Your SSH Private Key including Path> ./<Your Local File> <Default User name based on OS>@<Your Floating IP of VM>:~/`
+
+

This copies the file <Your Local File> from the current directory on the computer +you issued the command from, to your home directory on your NERC VM. (recall that +. is a Unix shortcut for the current directory path and ~ is a Unix shortcut +for my home directory)

+

For e.g.

+
scp -i ~/.ssh/your_pem_key_file.pem ./myfile.zip almalinux@199.94.60.219:~/myfile.zip
+
+
+

Important Note

+

While it’s probably best to compress all the files you intend to transfer into +one file, this is not always an option. To copy the contents of an entire directory, +you can use the -r (for recursive) flag.

+

For e.g.

+
scp -i ~/.ssh/your_pem_key_file.pem -r almalinux@<Floating_IP>:~/mydata/ ./destination_directory/
+
+

This copies all the files from ~/mydata/ on the cluster to the current +directory (i.e. .) on the computer you issued the command from. Here we can +replace ./ with actual full path on you local machine and also ~/ with +actual full path on your NERC VM.

+
+

Using tar+ssh

+

When you want to transfer many small files in a directory, we recommend +Globus. If you don't wish to use Globus, you can consider using +ssh piped with tar.

+

i. Send a directory to NERC VM:

+
tar cz /local/path/dirname | ssh -i <Your SSH Private Key including Path> <Default User name based on OS>@<Your Floating IP of VM> tar zxv -C /remote/path
+
+

ii. Get a directory from NERC VM:

+
ssh -i <Your SSH Private Key including Path> <Default User name based on OS>@<Your Floating IP of VM> tar cz /remote/path/dirname | tar zxv -C /local/path
+
+

Using rsync

+

Rsync is a fast, versatile, remote (and local) +file-copying tool. It is famous for its delta-transfer algorithm, which reduces +the amount of data sent over the network by sending only the differences between +the source files and the existing files in the destination. This can often lead +to efficiencies in repeat-transfer scenarios, as rsync only copies files that are +different between the source and target locations (and can even transfer partial +files when only part of a file has changed). This can be very useful in reducing +the amount of copies you may perform when synchronizing two datasets.

+

The basic syntax is: rsync SOURCE DESTINATION where SOURCE and DESTINATION +are filesystem paths. They can be local, either absolute or relative to the current +working directory, or they can be remote but prefixing something like +USERNAME@HOSTNAME: to the front of them.

+

i. Synchronizing from a local machine to NERC VM:

+
rsync -avxz ./source_directory/ -e "ssh -i ~/.ssh/your_pem_key_file.pem" <user_name>@<Floating_IP>:~/destination_directory/
+
+

ii. Synchronizing from NERC VM to a local machine:

+
rsync -avz -e "ssh -i ~/.ssh/your_pem_key_file.pem" -r <user_name>@<Floating_IP>:~/source_directory/ ./destination_directory/
+
+

iii. Update a previously made copy of "foo" on the NERC VM after you’ve made changes +to the local copy:

+
rsync -avz --delete foo/ -e "ssh -i ~/.ssh/your_pem_key_file.pem" <user_name>@<Floating_IP>:~/foo/
+
+
+

Be careful with this option!

+

The --delete option has no effect when making a new copy, and therefore can +be used in the previous example too (making the commands identical), but since +it recursively deletes files, it’s best to use it sparingly. If you want to +maintain a mirror (i.e. the DESTINATION is to be an exact copy of the +SOURCE) then you will want to add the --delete option. This deletes +files/directories in the DESTINATION that are no longer in the SOURCE.

+
+

iv. Update a previously made copy of "foo" on the NERC VM after you or someone +else has already updated it from a different source:

+
rsync -aAvz --update foo/ -e "ssh -i ~/.ssh/your_pem_key_file.pem" <user_name>@<Floating_IP>:~/foo/
+
+
+

Information

+

The --update option has no effect when making a new copy and can also be +specified in that case. If you're updating a master copy (i.e. the +DESTINATION may have files that are newer than the version(s) in SOURCE) +then you will also want to add the --update option. This will leave those +files alone and not revert them to the older copy in SOURCE.

+
+

Progress, Verbosity, Statistics

+

-v +Verbose mode — list each file transferred. +Adding more vs makes it more verbose.

+

--progress +Show a progress meter for each individual file transfer that is part of the +entire operation. If you have many small files then this option can significantly +slow down the transfer.

+

--stats +Print a short paragraph of statistics at the end of the session (e.g. average transfer +rate, total number of files transferred, etc).

+

Other Useful Options

+

--dry-run +Perform a dry-run of the session instead of actually modifying the DESTINATION. +Mostly useful when adding multiple -v options, especially for verifying --delete +is doing what you want.

+

--exclude PATTERN +Skip files/directories in the SOURCE that match a given pattern (supports regular +expressions)

+

Using Rclone

+

rclone is a convenient and performant command-line tool for transferring files +and synchronizing directories directly between your local file systems and a +given NERC VM.

+

Prerequisites:

+

To run the rclone commands, you need to have:

+ +

Configuring Rclone

+

First you'll need to configure rclone. The filesystem protocols, especially, +can have complicated authentication parameters so it's best to place these details +in a config file.

+

If you run rclone config file you will see where the default location is for +your current user.

+
+

Note

+

For Windows users, you may need to specify the full path to the Rclone +executable file if it's not included in your system's %PATH% variable.

+
+

Edit the config file's content on the path location described by +rclone config file command and add the following entry with the name [nerc]:

+
[nerc]
+type = sftp
+host = 199.94.60.219
+user = almalinux
+port =
+pass =
+key_file = C:\Users\YourName\.ssh\cloud.key
+shell_type = unix
+
+

More about the config for SFTP can be found here.

+

OR, You can locally copy this content to a new config file and then use this +flag to override the config location, e.g. rclone --config=FILE

+
+

Interactive Configuration

+

Run rclone config to setup. See Rclone config docs +for more details.

+
+

How to use Rclone

+

rclone supports many subcommands (see +the complete list of Rclone subcommands). +A few commonly used subcommands (assuming you configured the NERC VM filesystem +as nerc):

+

Listing Files and Folders

+

Once your NERC VM filesystem has been configured in Rclone, you can then use the +Rclone interface to List all the directories with the "lsd" command:

+
rclone lsd "nerc:"
+
+

or,

+
rclone lsd "nerc:" --config=rclone.conf
+
+

For e.g.

+
rclone lsd "nerc:" --config=rclone.conf
+        -1 2023-07-06 12:18:24        -1 .ssh
+        -1 2023-07-06 19:27:19        -1 destination_directory
+
+

To list the files and folders available within the directory (i.e. +"destination_directory") we can use the "ls" command:

+
rclone ls "nerc:destination_directory/"
+  653 README.md
+    0 image.png
+   12 test-file
+
+

Uploading and Downloading Files and Folders

+

rclone support a variety of options to allow you to copy, sync, and move files +from one destination to another.

+

A simple example of this can be seen below where we copy/upload the file +upload.me to the <your-directory> directory:

+
rclone copy "./upload.me" "nerc:<your-directory>/"
+
+

Another example, to copy/download the file upload.me from the remote +directory, <your-directory>, to your local machine:

+
rclone -P copy "nerc:<your-directory>/upload.me" "./"
+
+

Also, to Sync files into the <your-directory> directory it's recommended to +first try with --dry-run first. This will give you a preview of what would be +synced without actually performing any transfers.

+
rclone --dry-run sync /path/to/files nerc:<your-directory>
+
+

Then sync for real

+
rclone sync --interactive /path/to/files nerc:<your-directory>
+
+

Mounting VM filesystem on local filesystem

+

Linux:

+

First, you need to create a directory on which you will mount your filesystem:

+

mkdir ~/mnt-rclone

+

Then you can simply mount your filesystem with:

+

rclone -vv --vfs-cache-mode writes mount nerc: ~/mnt-rclone

+

Windows:

+

First you have to download Winfsp:

+

WinFsp is an open source Windows File System Proxy which provides a FUSE +emulation layer.

+

Then you can simply mount your VM's filesystem with (no need to create the directory +in advance):

+

rclone -vv --vfs-cache-mode writes mount nerc: C:/mnt-rclone

+

The vfs-cache-mode flag enables file caching. You can use either the writes +or full option. For further explanation you can see the official documentation.

+

Now that your VM's filesystem is mounted locally, you can list, create, and delete +files in it.

+

Unmount NERC VM filesystem

+

To unmount, simply press CTRL-C and the mount will be interrupted.

+

Using Graphical User Interface (GUI) Tools

+

i. WinSCP

+

WinSCP is a popular and free open-source SFTP +client, SCP client, and FTP client for Windows. Its main function is file transfer +between a local and a remote computer, with some basic file management functionality +using FTP, FTPS, SCP, SFTP, WebDAV, or S3 file transfer protocols.

+

Prerequisites:

+
    +
  • +

    WinSCP installed, see Download and Install the latest version of the WinSCP + for more information.

    +
  • +
  • +

    Go to WinSCP menu and open "View > Preferences".

    +
  • +
  • +

    When the "Preferences" dialog window appears, select "Transfer" in the options + on the left pane.

    +
  • +
  • +

    Click on the "Edit" button.

    +
  • +
  • +

    Then, in the popup dialog box, review the "Common options" group and uncheck + the "Preserve timestamp" option as shown below:

    +
  • +
+

Disable Preserve TimeStamp

+

Configuring WinSCP

+
    +
  • Click on the "New Tab" button as shown below:
  • +
+

Login

+
    +
  • Select either "SFTP" or "SCP" from the "File protocol" dropdown options + as shown below:
  • +
+

Choose SFTP or SCP File Protocol

+
    +
  • Provide the following required information:
  • +
+

"File protocol": Choose either ""SFTP" or "SCP""

+

"Host name": "<Your Floating IP of VM>"

+

"Port number": "22"

+

"User name": "<Default User name based on OS>"

+
+

Default User name based on OS

+
    +
  • +

    all Ubuntu images: ubuntu

    +
  • +
  • +

    all AlmaLinux images: almalinux

    +
  • +
  • +

    all Rocky Linux images: rocky

    +
  • +
  • +

    all Fedora images: fedora

    +
  • +
  • +

    all Debian images: debian

    +
  • +
  • +

    all RHEL images: cloud-user

    +
  • +
+

If you still have VMs running with deleted CentOS images, you need to + use the following default username for your CentOS images: centos.

+
+

"Password": "<Leave blank as you using SSH key>"

+
    +
  • Change Authentication Options
  • +
+

Before saving, click the "Advanced" button. +In the "Advanced Site Settings", under "SSH >> Authentication" settings, check +"Allow agent forwarding" and select the private key file with .ppk extension +from the file picker.

+

Advanced Site Settings for SSH Authentication

+
+

Helpful Tip

+

You can save your above configured site with some preferred name by + clicking the "Save" button and then giving a proper name to your site. + This prevents needing to manually enter all of your configuration again the + next time you need to use WinSCP.

+

Save Site WinSCP

+
+

Using WinSCP

+

You can follow the above steps to manually add a new site the next time you open +WinSCP, or you can connect to your previously saved site. Saved sites will be +listed in the popup dialog and can be selected by clicking on the site name.

+

Then click the "Login" button to connect to your NERC project's VM as shown below:

+

Login

+

Successful connection

+

You should now be connected to the VM's remote directories/files. You can drag +and drop your files to/from file windows to begin transfer. When you're finished, +click the "X" icon in the top right to disconnect.

+

ii. Cyberduck

+

Cyberduck is a libre server and cloud +storage browser for Mac and Windows. Its user-friendly interface enables seamless +connections to servers, enterprise file sharing, and various cloud storage platforms.

+

Prerequisites:

+ +

Configuring Cyberduck

+
    +
  • Click on the "Open Connection" button as shown below:
  • +
+

Open Connection

+
    +
  • Select either "SFTP" or "FTP" from the dropdown options as shown below:
  • +
+

Choose Amazon S3

+
    +
  • Provide the following required information:
  • +
+

"Server": "<Your Floating IP of VM>"

+

"Port": "22"

+

"User name": "<Default User name based on OS>"

+
+

Default User name based on OS

+
    +
  • +

    all Ubuntu images: ubuntu

    +
  • +
  • +

    all AlmaLinux images: almalinux

    +
  • +
  • +

    all Rocky Linux images: rocky

    +
  • +
  • +

    all Fedora images: fedora

    +
  • +
  • +

    all Debian images: debian

    +
  • +
  • +

    all RHEL images: cloud-user

    +
  • +
+
+

"Password": "<Leave blank as you using SSH key>"

+

"SSH Private Key": "Choose the appropriate SSH Private Key from your local +machine that has the corresponding public key attached to your VM"

+

Cyberduck SFTP or FTP Configuration

+

Using Cyberduck

+

Then click the "Connect" button to connect to your NERC VM as shown below:

+

Successful connection

+

You should now be connected to the VM's remote directories/files. You can drag +and drop your files to/from file windows to begin transfer. When you're +finished, click the "X" icon in the top right to disconnect.

+

iii. Filezilla

+

Filezilla is a free and +open source SFTP client which is built on modern standards. It is available +cross-platform (Mac, Windows and Linux) and is actively maintained. You can transfer +files to/from the cluster from your computer or any resources connected to your +computer (shared drives, Dropbox, etc.)

+

Prerequisites:

+ +

Configuring Filezilla

+
    +
  • Click on "Site Manager" icon as shown below:
  • +
+

Site Manager

+
    +
  • Click on "New Site" as shown below:
  • +
+

Click New Site

+
    +
  • Select either "SFTP" or "FTP" from the dropdown options as shown below:
  • +
+

Select Protocol

+
    +
  • Provide the following required information:
  • +
+

"Server": "<Your Floating IP of VM>"

+

"Port": "22"

+

"Logon Type": "Key file" from the dropdown option

+

"User": "<Default User name based on OS>"

+
+

Default User name based on OS

+
    +
  • +

    all Ubuntu images: ubuntu

    +
  • +
  • +

    all AlmaLinux images: almalinux

    +
  • +
  • +

    all Rocky Linux images: rocky

    +
  • +
  • +

    all Fedora images: fedora

    +
  • +
  • +

    all Debian images: debian

    +
  • +
  • +

    all RHEL images: cloud-user

    +
  • +
+

If you still have VMs running with deleted CentOS images, you need to + use the following default username for your CentOS images: centos.

+
+

"Key file": "Browse and choose the appropriate SSH Private Key from you +local machine that has corresponding Public Key attached to your VM"

+

Filezilla SFTP or FTP Configuration

+

Using Filezilla

+

Then click "Connect" button to connect to your NERC VM as shown below:

+

Successful connection

+

You should now be connected to the VM and see your local files in the left-hand +pane and the remote files in the right-hand pane. You can drag and drop between +them or drag and drop to/from file windows on your computer. When you're +finished, click the "X" icon in the top right to disconnect.

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/data-transfer/images/choose_SFTP_or_SCP_protocol.png b/openstack/data-transfer/images/choose_SFTP_or_SCP_protocol.png new file mode 100644 index 00000000..86e2a9d7 Binary files /dev/null and b/openstack/data-transfer/images/choose_SFTP_or_SCP_protocol.png differ diff --git a/openstack/data-transfer/images/cyberduck-open-connection-new.png b/openstack/data-transfer/images/cyberduck-open-connection-new.png new file mode 100644 index 00000000..8224b273 Binary files /dev/null and b/openstack/data-transfer/images/cyberduck-open-connection-new.png differ diff --git a/openstack/data-transfer/images/cyberduck-open-connection-sftp.png b/openstack/data-transfer/images/cyberduck-open-connection-sftp.png new file mode 100644 index 00000000..89bc6237 Binary files /dev/null and b/openstack/data-transfer/images/cyberduck-open-connection-sftp.png differ diff --git a/openstack/data-transfer/images/cyberduck-select-sftp-or-ftp.png b/openstack/data-transfer/images/cyberduck-select-sftp-or-ftp.png new file mode 100644 index 00000000..7be81f6a Binary files /dev/null and b/openstack/data-transfer/images/cyberduck-select-sftp-or-ftp.png differ diff --git a/openstack/data-transfer/images/cyberduck-sftp-successful-connection.png b/openstack/data-transfer/images/cyberduck-sftp-successful-connection.png new file mode 100644 index 00000000..296fc905 Binary files /dev/null and b/openstack/data-transfer/images/cyberduck-sftp-successful-connection.png differ diff --git a/openstack/data-transfer/images/filezilla-click-new-site.png b/openstack/data-transfer/images/filezilla-click-new-site.png new file mode 100644 index 00000000..69aa41a2 Binary files /dev/null and b/openstack/data-transfer/images/filezilla-click-new-site.png differ diff --git a/openstack/data-transfer/images/filezilla-connect-config.png b/openstack/data-transfer/images/filezilla-connect-config.png new file mode 100644 index 00000000..dc704c07 Binary files /dev/null and b/openstack/data-transfer/images/filezilla-connect-config.png differ diff --git a/openstack/data-transfer/images/filezilla-new-site.png b/openstack/data-transfer/images/filezilla-new-site.png new file mode 100644 index 00000000..35e23687 Binary files /dev/null and b/openstack/data-transfer/images/filezilla-new-site.png differ diff --git a/openstack/data-transfer/images/filezilla-sftp-or-ftp.png b/openstack/data-transfer/images/filezilla-sftp-or-ftp.png new file mode 100644 index 00000000..979046d1 Binary files /dev/null and b/openstack/data-transfer/images/filezilla-sftp-or-ftp.png differ diff --git a/openstack/data-transfer/images/filezilla-sftp-successful-connection.png b/openstack/data-transfer/images/filezilla-sftp-successful-connection.png new file mode 100644 index 00000000..52629f44 Binary files /dev/null and b/openstack/data-transfer/images/filezilla-sftp-successful-connection.png differ diff --git a/openstack/data-transfer/images/globus-transfer.png b/openstack/data-transfer/images/globus-transfer.png new file mode 100644 index 00000000..b6668d9e Binary files /dev/null and b/openstack/data-transfer/images/globus-transfer.png differ diff --git a/openstack/data-transfer/images/winscp-new-tab.png b/openstack/data-transfer/images/winscp-new-tab.png new file mode 100644 index 00000000..1fdf69c2 Binary files /dev/null and b/openstack/data-transfer/images/winscp-new-tab.png differ diff --git a/openstack/data-transfer/images/winscp-preferences-perserve-timestamp-disable.png b/openstack/data-transfer/images/winscp-preferences-perserve-timestamp-disable.png new file mode 100644 index 00000000..62d7337a Binary files /dev/null and b/openstack/data-transfer/images/winscp-preferences-perserve-timestamp-disable.png differ diff --git a/openstack/data-transfer/images/winscp-save-site.png b/openstack/data-transfer/images/winscp-save-site.png new file mode 100644 index 00000000..1ad86e79 Binary files /dev/null and b/openstack/data-transfer/images/winscp-save-site.png differ diff --git a/openstack/data-transfer/images/winscp-site-login.png b/openstack/data-transfer/images/winscp-site-login.png new file mode 100644 index 00000000..1c6c36ff Binary files /dev/null and b/openstack/data-transfer/images/winscp-site-login.png differ diff --git a/openstack/data-transfer/images/winscp-site-successfully-connected.png b/openstack/data-transfer/images/winscp-site-successfully-connected.png new file mode 100644 index 00000000..05538f51 Binary files /dev/null and b/openstack/data-transfer/images/winscp-site-successfully-connected.png differ diff --git a/openstack/data-transfer/images/winscp-ssh-auth.png b/openstack/data-transfer/images/winscp-ssh-auth.png new file mode 100644 index 00000000..e4aac8db Binary files /dev/null and b/openstack/data-transfer/images/winscp-ssh-auth.png differ diff --git a/openstack/decommission/decommission-openstack-resources/index.html b/openstack/decommission/decommission-openstack-resources/index.html new file mode 100644 index 00000000..83a50b11 --- /dev/null +++ b/openstack/decommission/decommission-openstack-resources/index.html @@ -0,0 +1,4960 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Decommission Your NERC OpenStack Resources

+

You can decommission all of your NERC OpenStack resources sequentially as outlined +below.

+

Prerequisite

+
    +
  • +

    Backup: Back up any critical data or configurations stored on the resources + that going to be decommissioned. This ensures that important information is not + lost during the process. You can refer to this guide + to initiate and carry out data transfer to and from the virtual machine.

    +
  • +
  • +

    Shutdown Instances: If applicable, Shut Off any running instances + to ensure they are not actively processing data during decommissioning.

    +
  • +
  • +

    Setup OpenStack CLI, see OpenStack Command Line setup + for more information.

    +
  • +
+

Delete all VMs

+

For instructions on deleting instance(s), please refer to this documentation.

+

Delete volumes and snapshots

+

For instructions on deleting volume(s), please refer to this documentation.

+

To delete snapshot(s), if that snapshot is not used for any running instance.

+

Navigate to Project -> Volumes -> Snapshots.

+

Delete Snapshots

+
+

Unable to Delete Snapshots

+

First delete all volumes and instances (and its attached volumes) that are +created using the snapshot first, you will not be able to delete the volume +snapshots.

+
+

Delete all custom built Images and Instance Snapshot built Images

+

Navigate to Project -> Compute -> Images.

+

Select all of the custom built that have Visibility set as "Private" images to delete.

+

Delete your all private Networks, Routers and Internal Interfaces on the Routers

+

To review all Network and its connectivities, you need to:

+

Navigate to Project -> Network -> Network Topology.

+

This will shows all view of current Network in your project in Graph or Topology +view. Make sure non instances are connected to your private network, which is +setup by following this documentation. +If there are any instances then refer this to delete those VMs.

+

Network Topology

+

First, delete all other Routers used to create private networks, which is +setup by following this documentation +except default_router from:

+

Navigate to Project -> Network -> Routers.

+

First, delete all other Routers used to create private networks except default_network +and provider then only you will be able to delete the Networks from:

+

Navigate to Project -> Network -> Networks.

+
+

Unable to Delete Networks

+

First delete all instances and then delete all routers then only you will be +able to delete the associated private networks.

+
+

Release all Floating IPs

+

Navigate to Project -> Network -> Floating IPs.

+

Release all Floating IPs

+

For instructions on releasing your allocated Floating IP back into the NERC floating +IP pool, please refer to this documentation.

+

Clean up all added Security Groups

+

First, delete all other security groups except default also make sure the default +security group does not have any extra rules. To view all Security Groups:

+

Navigate to Project -> Network -> Security Groups.

+
+

Unable to Delete Security Groups

+

First delete all instances and then only you will be able to delete the +security groups. If a security group is attached to a VM, that security group +will not be allowed to delete.

+
+

Delete all of your stored Key Pairs

+

Navigate to Project -> Compute -> Key Pairs.

+
+

Unable to Delete Key Pairs

+

First delete all instances that are using the selected Key Pairs then only you +will be to delete them.

+
+

Delete all buckets and objects

+

For instructions on deleting bucket(s) along with all objects, please refer to +this documentation.

+

To delete snapshot(s), if that snapshot is not used for any running instance.

+

Navigate to Project -> Object Store -> Containers.

+

Delete Containers

+
+

Unable to Delete Container with Objects inside

+

First delete all objects inside a Container first, then only you will be able +to delete the container. Please make sure any critical objects data are already +been remotely backed up before deleting them. You can also use openstack client +to recursively delete the containers which has multi-level objects inside as +described here. +So, you don't need to manually delete all objects inside a container prior +deleting the container. This will save a lot of your time and effort.

+
+

Use ColdFront to reduce the Storage Quota to Zero

+

Each allocation, whether requested or approved, will be billed based on the +pay-as-you-go model. The exception is for Storage quotas, where the cost +is determined by your requested and approved allocation values +to reserve storage from the total NESE storage pool. For NERC (OpenStack) +Resource Allocations, storage quotas are specified by the "OpenStack Volume Quota +(GiB)" and "OpenStack Swift Quota (GiB)" allocation attributes.

+

Even if you have deleted all volumes, snapshots, and object storage buckets and +objects in your OpenStack project. It is very essential to adjust the approved +values for your NERC (OpenStack) resource allocations to zero (0) otherwise you +still be incurring a charge for the approved storage as explained in +Billing FAQs.

+

To achieve this, you must submit a final change request to reduce the +Storage Quotas for the "OpenStack Volume Quota (GiB)" and "OpenStack Swift Quota +(GiB)" to zero (0) for your NERC (OpenStack) resource type. You can review +and manage these resource allocations by visiting the +resource allocations. Here, you +can filter the allocation of your interest and then proceed to request a +change request.

+

Please make sure your change request looks like this:

+

Change Request to Set Storage Quotas Zero

+

Wait until the requested resource allocation gets approved by the NERC's admin.

+

After approval, kindly review and verify that the quotas are accurately +reflected in your resource allocation +and OpenStack project. Please ensure that the +approved quota values are accurately displayed as explained here.

+

Review your Block Storage(Volume/Cinder) Quota

+

Please confirm and verify that the gigabytes resource value that specifies total +space in external volumes is set to +a limit of zero (0) in correspondence with the approved "OpenStack Volume Quota (GiB)" +of your allocation when running openstack quota show openstack client command +as shown below:

+
openstack quota show
++-----------------------+--------+
+| Resource              |  Limit |
++-----------------------+--------+
+...
+| gigabytes             |      0 |
+...
++-----------------------+--------+
+
+

Review your Object Storage(Swift) Quota

+

To check the overall space used, you can use the following command

+

Also, please confirm and verify that the Quota-Bytes property value is set to +a limit of zero (0) in correspondence with the approved "OpenStack Swift Quota +(GiB)" of your allocation and also check the overall space used in Bytes +is one (1) along with no Containers and Objects, when running +openstack object store account show openstack client command as shown below:

+
openstack object store account show
++------------+---------------------------------------+
+| Field      | Value                                 |
++------------+---------------------------------------+
+| Account    | AUTH_5e1cbcfe729a4c7e8fb2fd5328456eea |
+| Bytes      | 0                                     |
+| Containers | 0                                     |
+| Objects    | 0                                     |
+| properties | Quota-Bytes='1'                       |
++------------+---------------------------------------+
+
+

Review your Project Usage

+

Several commands are available to access project-level resource utilization details. +The openstack limits show --absolute command offers a comprehensive view of the +most crucial resources and also allows you to view your current resource consumption.

+

Multiple commands are at your disposal to access project resource utilization +details. The openstack limits show --absolute command offers a comprehensive +view of critical resources and allows you to assess your current resource consumption.

+
+

Very Important: Ensure No Resources that will be Billed are Used

+

Most importantly, ensure that there is no active usage for any of your +currently allocated project resources.

+
+

Please ensure the output appears as follows, with all used resources having a value +of zero (0), except for totalSecurityGroupsUsed.

+
openstack limits show --absolute
++--------------------------+-------+
+| Name                     | Value |
++--------------------------+-------+
+...
+| totalRAMUsed             |     0 |
+| totalCoresUsed           |     0 |
+| totalInstancesUsed       |     0 |
+| totalFloatingIpsUsed     |     0 |
+| totalSecurityGroupsUsed  |     1 |
+| totalServerGroupsUsed    |     0 |
+...
+| totalVolumesUsed         |     0 |
+| totalGigabytesUsed       |     0 |
+| totalSnapshotsUsed       |     0 |
+| totalBackupsUsed         |     0 |
+| totalBackupGigabytesUsed |     0 |
++--------------------------+-------+
+
+

Review your Project's Resource Quota from the OpenStack Dashboard

+

After removing all OpenStack resources and updating the Storage Quotas to set them +to zero (0), you can review and verify that these changes are reflected in your +Horizon Dashboard Overview.

+

Navigate to Project -> Compute -> Overview.

+

Horizon Dashboard

+

Finally, Archive your ColdFront Project

+

As a PI, you will now be able to Archive your ColdFront Project via +accessing NERC's ColdFront interface. +Please refer to these intructions +on how to archive your projects that need to be decommissioned.

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/decommission/images/change_request_zero_storage.png b/openstack/decommission/images/change_request_zero_storage.png new file mode 100644 index 00000000..0089929f Binary files /dev/null and b/openstack/decommission/images/change_request_zero_storage.png differ diff --git a/openstack/decommission/images/delete-containers.png b/openstack/decommission/images/delete-containers.png new file mode 100644 index 00000000..f3ba7789 Binary files /dev/null and b/openstack/decommission/images/delete-containers.png differ diff --git a/openstack/decommission/images/delete-snapshots.png b/openstack/decommission/images/delete-snapshots.png new file mode 100644 index 00000000..af39799e Binary files /dev/null and b/openstack/decommission/images/delete-snapshots.png differ diff --git a/openstack/decommission/images/horizon_dashboard.png b/openstack/decommission/images/horizon_dashboard.png new file mode 100644 index 00000000..7d26c241 Binary files /dev/null and b/openstack/decommission/images/horizon_dashboard.png differ diff --git a/openstack/decommission/images/instance_change_security_groups.png b/openstack/decommission/images/instance_change_security_groups.png new file mode 100644 index 00000000..5437f8a4 Binary files /dev/null and b/openstack/decommission/images/instance_change_security_groups.png differ diff --git a/openstack/decommission/images/network-topology.png b/openstack/decommission/images/network-topology.png new file mode 100644 index 00000000..6c4d4163 Binary files /dev/null and b/openstack/decommission/images/network-topology.png differ diff --git a/openstack/decommission/images/release_floating_ips.png b/openstack/decommission/images/release_floating_ips.png new file mode 100644 index 00000000..06ff1f9b Binary files /dev/null and b/openstack/decommission/images/release_floating_ips.png differ diff --git a/openstack/index.html b/openstack/index.html new file mode 100644 index 00000000..173b0c85 --- /dev/null +++ b/openstack/index.html @@ -0,0 +1,4887 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

OpenStack Tutorial Index

+

If you're just starting out, we recommend starting from

+

Access the OpenStack Dashboard +and going through the tutorial in order.

+

If you just need to review a specific step, you can find the page you need in +the list below.

+

Logging In

+ +

Access and Security

+ +

Create & Connect to the VM

+ +

OpenStack CLI

+ +

Persistent Storage

+

Block Storage/ Volumes/ Cinder

+ +

Object Storage/ Swift

+ +

Data Transfer

+ +

Backup your instance and data

+ +

VM Management

+ +

Decommission OpenStack Resources

+ +
+

Advanced OpenStack Topics

+
+

Setting Up Your Own Network

+ +

Domain or Host Name for your VM

+ +

Using Terraform to provision NERC resources

+ +

Python SDK

+ +

Setting Up Your Own Images

+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/logging-in/access-the-openstack-dashboard/index.html b/openstack/logging-in/access-the-openstack-dashboard/index.html new file mode 100644 index 00000000..b24b3cea --- /dev/null +++ b/openstack/logging-in/access-the-openstack-dashboard/index.html @@ -0,0 +1,4500 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Access the OpenStack Dashboard

+

The OpenStack Dashboard which is a web-based graphical interface, code named +Horizon, is located at https://stack.nerc.mghpcc.org.

+

The NERC Authentication supports CILogon using Keycloak for gateway authentication +and authorization that provides federated login via your institution accounts and +it is the recommended authentication method.

+

Make sure you are selecting "OpenID Connect" (which is selected by default) as +shown here:

+

OpenID Connect

+

Next, you will be redirected to CILogon welcome page as shown below:

+

CILogon Welcome Page

+

MGHPCC Shared Services (MSS) Keycloak will request approval of access to the +following information from the user:

+
    +
  • +

    Your CILogon user identifier

    +
  • +
  • +

    Your name

    +
  • +
  • +

    Your email address

    +
  • +
  • +

    Your username and affiliation from your identity provider

    +
  • +
+

which are required in order to allow access your account on NERC's OpenStack +dashboard.

+

From the "Selected Identity Provider" dropdown option, please select your institution's +name. If you would like to remember your selected institution name for future +logins please check the "Remember this selection" checkbox this will bypass the +CILogon welcome page on subsequent visits and proceed directly to the selected insitution's +identity provider(IdP). Click "Log On". This will redirect to your respective institutional +login page where you need to enter your institutional credentials.

+
+

Important Note

+

The NERC does not see or have access to your institutional account credentials, +it points to your selected insitution's identity provider and redirects back +once authenticated.

+
+

Once you successfully authenticate you should see an overview of the resources +like Compute (instances, VCPUs, RAM, etc.), Volume and Network. You can also +see usage summary for provided date range.

+

OpenStack Horizon dashboard

+
+

I can't find my virtual machine

+

If you are a member of several projects i.e. ColdFront NERC (OpenStack) +allocations, you may need to switch the project before you can see and use the +OpenStack resources you or your team has created. Clicking on the project dropdown +which is displayed near the top right side will popup the list of projects you +are in. You can select the new project by hovering and clicking on the project +name in that list as shown below:

+

OpenStack Project List

+
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/logging-in/dashboard-overview/index.html b/openstack/logging-in/dashboard-overview/index.html new file mode 100644 index 00000000..ddaf9ecd --- /dev/null +++ b/openstack/logging-in/dashboard-overview/index.html @@ -0,0 +1,4695 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Dashboard Overview

+

When you are logged-in, you will be redirected to the Compute panel which is under +the Project tab. In the top bar, you can see the two small tabs: "Project" and "Identity".

+

Beneath that you can see six panels in larger print: "Project", "Compute", +"Volumes", "Network", "Orchestration", and "Object Store".

+

Project Panel

+

Navigate: Project -> Project

+
    +
  • API Access: View API endpoints.
  • +
+

Project API Access

+

Compute Panel

+

Navigate: Project -> Compute

+
    +
  • Overview: View reports for the project.
  • +
+

Compute dashboard

+
    +
  • +

    Instances: View, launch, create a snapshot from, stop, pause, or reboot + instances, or connect to them through VNC.

    +
  • +
  • +

    Images: View images and instance snapshots created by project users, plus any + images that are publicly available. Create, edit, and delete images, and launch + instances from images and snapshots.

    +
  • +
  • +

    Key Pairs: View, create, edit, import, and delete key pairs.

    +
  • +
  • +

    Server Groups: View, create, edit, and delete server groups.

    +
  • +
+

Volume Panel

+

Navigate: Project -> Volume

+
    +
  • +

    Volumes: View, create, edit, delete volumes, and accept volume trnasfer.

    +
  • +
  • +

    Backups: View, create, edit, and delete backups.

    +
  • +
  • +

    Snapshots: View, create, edit, and delete volume snapshots.

    +
  • +
  • +

    Groups: View, create, edit, and delete groups.

    +
  • +
  • +

    Group Snapshots: View, create, edit, and delete group snapshots.

    +
  • +
+

Network Panel

+

Navigate: Project -> Network

+
    +
  • Network Topology: View the network topology.
  • +
+

Network Topology

+
    +
  • +

    Networks: Create and manage public and private networks.

    +
  • +
  • +

    Routers: Create and manage routers.

    +
  • +
  • +

    Security Groups: View, create, edit, and delete security groups and security + group rules..

    +
  • +
  • +

    Load Balancers: View, create, edit, and delete load balancers.

    +
  • +
  • +

    Floating IPs: Allocate an IP address to or release it from a project.

    +
  • +
  • +

    Trunks: View, create, edit, and delete trunk.

    +
  • +
+

Orchestration Panel

+

Navigate: Project->Orchestration

+
    +
  • +

    Stacks: Use the REST API to orchestrate multiple composite cloud applications.

    +
  • +
  • +

    Resource Types: view various resources types and their details.

    +
  • +
  • +

    Template Versions: view different heat templates.

    +
  • +
  • +

    Template Generator: GUI to generate and save template using drag and drop resources.

    +
  • +
+

Object Store Panel

+

Navigate: Project->Object Store

+
    +
  • Containers: Create and manage containers and objects. In future you would use + this tab to create Swift object storage + for your projects on a need basis.
  • +
+

Swift Object Containers

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/logging-in/images/CILogon_interface.png b/openstack/logging-in/images/CILogon_interface.png new file mode 100644 index 00000000..fd1c073f Binary files /dev/null and b/openstack/logging-in/images/CILogon_interface.png differ diff --git a/openstack/logging-in/images/horizon_dashboard.png b/openstack/logging-in/images/horizon_dashboard.png new file mode 100644 index 00000000..4814d2ba Binary files /dev/null and b/openstack/logging-in/images/horizon_dashboard.png differ diff --git a/openstack/logging-in/images/network_topology.png b/openstack/logging-in/images/network_topology.png new file mode 100644 index 00000000..1f98944f Binary files /dev/null and b/openstack/logging-in/images/network_topology.png differ diff --git a/openstack/logging-in/images/object_containers.png b/openstack/logging-in/images/object_containers.png new file mode 100644 index 00000000..5b76ca81 Binary files /dev/null and b/openstack/logging-in/images/object_containers.png differ diff --git a/openstack/logging-in/images/openstack_login.png b/openstack/logging-in/images/openstack_login.png new file mode 100644 index 00000000..ea48c9f7 Binary files /dev/null and b/openstack/logging-in/images/openstack_login.png differ diff --git a/openstack/logging-in/images/openstack_project_list.png b/openstack/logging-in/images/openstack_project_list.png new file mode 100644 index 00000000..74a5878c Binary files /dev/null and b/openstack/logging-in/images/openstack_project_list.png differ diff --git a/openstack/logging-in/images/project_API_access.png b/openstack/logging-in/images/project_API_access.png new file mode 100644 index 00000000..b2d4c8a9 Binary files /dev/null and b/openstack/logging-in/images/project_API_access.png differ diff --git a/openstack/management/images/delete_multiple_instances.png b/openstack/management/images/delete_multiple_instances.png new file mode 100644 index 00000000..f7ccec88 Binary files /dev/null and b/openstack/management/images/delete_multiple_instances.png differ diff --git a/openstack/management/images/edit_instance.png b/openstack/management/images/edit_instance.png new file mode 100644 index 00000000..b0cc3cd2 Binary files /dev/null and b/openstack/management/images/edit_instance.png differ diff --git a/openstack/management/images/edit_instance_to_rename.png b/openstack/management/images/edit_instance_to_rename.png new file mode 100644 index 00000000..f03b86a9 Binary files /dev/null and b/openstack/management/images/edit_instance_to_rename.png differ diff --git a/openstack/management/images/instance_actions.png b/openstack/management/images/instance_actions.png new file mode 100644 index 00000000..5ddbae08 Binary files /dev/null and b/openstack/management/images/instance_actions.png differ diff --git a/openstack/management/images/rescue_instance_popup.png b/openstack/management/images/rescue_instance_popup.png new file mode 100644 index 00000000..e90d550c Binary files /dev/null and b/openstack/management/images/rescue_instance_popup.png differ diff --git a/openstack/management/vm-management/index.html b/openstack/management/vm-management/index.html new file mode 100644 index 00000000..8036ef1d --- /dev/null +++ b/openstack/management/vm-management/index.html @@ -0,0 +1,4989 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

VM Management

+

RedHat OpenStack offers numerous functionalities for handling virtual machines, +and comprehensive information can be found in the +official OpenStack site user guide, +please keep in mind that certain features may not be fully implemented at NERC OpenStack.

+

Instance Management Actions

+

After launching an instance (On the left side bar, click on +Project -> Compute -> Instances), several options are available under the +Actions menu located on the right hand side of your screen as shown here:

+

Instance Management Actions

+

Renaming VM

+

Once a VM is created, its name is set based on user specified Instance Name +while launching an instance using Horizon dashboard or specified +in openstack server create ... command using openstack client.

+

To rename a VM, navigate to Project -> Compute -> Instances.

+

Select an instance.

+

In the menu list in the actions column, select "Edit Instance" by clicking on +the arrow next to "Create Snapshot" as shown below:

+

Edit Instance to Rename

+

Then edit the Name and also Description(Optional) in "Information" tab and +save it:

+

Edit Instance

+

Stopping and Starting

+

Virtual machines can be stopped and initiated using various methods, and these +actions are executed through the openstack command with the relevant parameters.

+
    +
  1. +

    Reboot is equivalent to powering down the machine and then restarting it. A + complete boot sequence takes place and thus the machine returns to use in a few + minutes.

    +

    Soft Reboot:

    +
      +
    • +

      A soft reboot attempts a graceful shut down and restart of the instance. It + sends an ACPI Restart request to the VM. Similar to sending a reboot command + to a physical computer.

      +
    • +
    • +

      Click Action -> Soft Reboot Instance.

      +
    • +
    • +

      Status will change to Reboot.

      +
    • +
    +

    Hard Reboot:

    +
      +
    • +

      A hard reboot power cycles the instance. This forcibly restart your VM. Similar + to cycling the power on a physical computer.

      +
    • +
    • +

      Click Action -> Hard Reboot Instance.

      +
    • +
    • +

      Status will change to Hard Reboot.

      +
    • +
    +
  2. +
  3. +

    The Pause & Resume feature enables the temporary suspension of the VM. While + in this state, the VM is retained in memory but doesn't receive any allocated + CPU time. This proves handy when conducting interventions on a group of servers, + preventing the VM from processing during the intervention.

    +
      +
    • +

      Click Action -> Pause Instance.

      +
    • +
    • +

      Status will change to Paused.

      +
    • +
    • +

      The Resume operation typically completes in less than a second by clicking + Action -> Resume Instance.

      +
    • +
    +
  4. +
  5. +

    The Suspend & Resume function saves the VM onto disk and swiftly restores + it (in less than a minute). This process is quicker than the stop/start method, + and the VM resumes from where it was suspended, avoiding a new boot cycle.

    +
      +
    • +

      Click Action -> Suspend Instance.

      +
    • +
    • +

      Status will change to Suspended.

      +
    • +
    • +

      The Resume operation typically completes in less than a second by clicking + Action -> Resume Instance.

      +
    • +
    +
  6. +
  7. +

    Shelve & Unshelve

    +
      +
    • +

      Click Action -> Shelve Instance.

      +
    • +
    • +

      When shelved it stops all computing, stores a snapshot of the instance. The + shelved instances are already imaged as part of the shelving process and appear + in Project -> Compute -> Images as "_shelved".

      +
    • +
    • +

      We strongly recommend detaching volumes before shelving.

      +
    • +
    • +

      Status will change to Shelved Offloaded.

      +
    • +
    • +

      To unshelve the instance, click Action -> Unshelve Instance.

      +
    • +
    +
  8. +
  9. +

    Shut Off & Start Instance

    +
      +
    • +

      Click Action -> Shut Off Instance.

      +
    • +
    • +

      When shut off it stops active computing, consuming fewer resources than a Suspend.

      +
    • +
    • +

      Status will change to Shutoff.

      +
    • +
    • +

      To start the shut down VM, click Action -> Start Instance.

      +
    • +
    +
  10. +
+

Using openstack client commands

+

The above mentioned actions can all be performed running the openstack client +commands with the following syntax:

+
openstack server <operation> <INSTANCE_NAME_OR_ID>
+
+

such as,

+
openstack server shutoff my-vm
+
+openstack server restart my-vm
+
+
+

Pro Tip

+

If your instance name <INSTANCE_NAME_OR_ID> includes spaces, you need to +enclose the name of your instance in quotes, i.e. "<INSTANCE_NAME_OR_ID>"

+

For example: openstack server restart "My Test Instance".

+
+

Create Snapshot

+
    +
  • +

    Click Action -> Create Snapshot.

    +
  • +
  • +

    Instances must have status Active, Suspended, or Shutoff to create snapshot.

    +
  • +
  • +

    This creates an image template from a VM instance also known as "Instance Snapshot" + as described here.

    +
  • +
  • +

    The menu will automatically shift to Project -> Compute -> Images once the + image is created.

    +
  • +
  • +

    The sole distinction between an image directly uploaded to the image data + service, glance and an image generated + through a snapshot is that the snapshot-created image possesses additional + properties in the glance database and defaults to being private.

    +
  • +
+
+

Glance Image Service

+

Glance is a central image repository which provides discovering, registering, +retrieving for disk and server images. More about this service can be +found here.

+
+

Rescue a VM

+

There are instances where a virtual machine may encounter boot failure due to +reasons like misconfiguration or issues with the system disk. To diagnose and +address the problem, the virtual machine console offers valuable diagnostic +information on the underlying cause.

+

Alternatively, utilizing OpenStack's rescue functions involves booting the +virtual machine using the original image, with the system disk provided as a +secondary disk. This allows manipulation of the disk, such as using fsck to +address filesystem issues or mounting and editing the configuration.

+
+

Important Note

+

We cannot rescue a volume-backed instance that means ONLY instance running +using Ephemeral disk can +be rescued. Also, this procedure has not been tested for Windows virtual machines.

+
+

VMs can be rescued using either the OpenStack dashboard by clicking +Action -> Rescue Instance or via the openstack client using +openstack server rescue ... command.

+

If however, the virtual machine is no longer required and no data on the +associated system or ephemeral disk needs to be preserved, the following command +can be run:

+
openstack server rescue <INSTANCE_NAME_OR_ID>
+
+

or, using Horizon dashboard:

+

Navigate to Project -> Compute -> Instances.

+

Select an instance.

+

Click Action -> Rescue Instance.

+
+

When to use Rescue Instance

+

The rescue mode is only for emergency purpose, for example in case of a +system or access failure. This will shut down your instance and mount the +root disk to a temporary server. Then, you will be able to connect to this +server, repair the system configuration or recover your data. You may +optionally select an image and set a password on the rescue instance server.

+
+

Rescue Instance Popup

+

Troubleshoot the disk

+

This will reboot the virtual machine and you can then log in using the key pair +previously defined. You will see two disks, /dev/vda which is the new system disk +and /dev/vdb which is the old one to be repaired.

+
ubuntu@my-vm:~$ lsblk
+NAME    MAJ:MIN RM  SIZE RO TYPE MOUNTPOINTS
+loop0     7:0    0   62M  1 loop /snap/core20/1587
+loop1     7:1    0 79.9M  1 loop /snap/lxd/22923
+loop2     7:2    0   47M  1 loop /snap/snapd/16292
+vda     252:0    0  2.2G  0 disk
+├─vda1  252:1    0  2.1G  0 part /
+├─vda14 252:14   0    4M  0 part
+└─vda15 252:15   0  106M  0 part /boot/efi
+vdb     252:16   0   20G  0 disk
+├─vdb1  252:17   0 19.9G  0 part
+├─vdb14 252:30   0    4M  0 part
+└─vdb15 252:31   0  106M  0 part
+
+

The old one can be mounted and configuration files edited or fsck'd.

+
# lsblk
+# cat /proc/diskstats
+# mkdir /tmp/rescue
+# mount /dev/vdb1 /tmp/rescue
+
+

Unrescue the VM

+

On completion, the VM can be returned to active state with +openstack server unrescue ... openstack client command, and rebooted.

+
openstack server unrescue <INSTANCE_NAME_OR_ID>
+
+

Then the secondary disk is removed as shown below:

+
ubuntu@my-vm:~$ lsblk
+NAME    MAJ:MIN RM  SIZE RO TYPE MOUNTPOINTS
+loop0     7:0    0   47M  1 loop /snap/snapd/16292
+vda     252:0    0   20G  0 disk
+├─vda1  252:1    0 19.9G  0 part /
+├─vda14 252:14   0    4M  0 part
+└─vda15 252:15   0  106M  0 part /boot/efi
+
+

Alternatively, using Horizon dashboard:

+

Navigate to Project -> Compute -> Instances.

+

Select an instance.

+

Click Action -> Unrescue Instance.

+

And then Action -> Soft Reboot Instance.

+

Delete Instance

+

VMs can be deleted using either the OpenStack dashboard by clicking +Action -> Delete Instance or via the openstack client openstack server delete +command.

+
+

How can I delete multiple instances at once?

+

Using the Horizon dashboard, navigate to Project -> Compute -> Instances. +In the Instances panel, you should see a list of all instances running in +your project. Select the instances you want to delete by ticking the checkboxes +next to their names. Then, click on "Delete Instances" button located on the +top right side, as shown below:

+

Delete Multiple Instances At Once

+
+
+

Important Note

+

This will immediately terminate the instance, delete all contents of the +virtual machine and erase the disk. This operation is not recoverable.

+
+

There are other options available if you wish to keep the virtual machine for +future usage. These do, however, continue to use quota for the project even though +the VM is not running.

+
    +
  • Snapshot the VM to keep an offline copy of the virtual machine that can be + performed as described here.
  • +
+

If however, the virtual machine is no longer required and no data on the +associated system or ephemeral disk needs to be preserved, the following command +can be run:

+
openstack server delete <INSTANCE_NAME_OR_ID>
+
+

or, using Horizon dashboard:

+

Navigate to Project -> Compute -> Instances.

+

Select an instance.

+

Click Action -> Delete Instance.

+
+

Important Note: Unmount volumes first

+

Ensure to unmount any volumes attached to your instance before initiating +the deletion process, as failure to do so may lead to data corruption in +both your data and the associated volume.

+
+
    +
  • +

    If the instance is using Ephemeral disk: + It stops and removes the instance along with the ephemeral disk. + All data will be permanently lost!

    +
  • +
  • +

    If the instance is using Volume-backed disk: + It stops and removes the instance. If "Delete Volume on Instance Delete" + was explicitely set to Yes, All data will be permanently lost!. If set + to No (which is default selected while launching an instance), the volume + may be used to boot a new instance, though any data stored in memory will be + permanently lost. For more in-depth information on making your VM setup and + data persistent, you can explore the details here.

    +
  • +
  • +

    Status will briefly change to Deleting while the instance is being removed.

    +
  • +
+

The quota associated with this virtual machine will be returned to the project +and you can review and verify that looking at your +OpenStack dashboard overview.

+
    +
  • Navigate to Project -> Compute -> Overview.
  • +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/openstack-cli/images/openstack_cli_cred.png b/openstack/openstack-cli/images/openstack_cli_cred.png new file mode 100644 index 00000000..9656f0dd Binary files /dev/null and b/openstack/openstack-cli/images/openstack_cli_cred.png differ diff --git a/openstack/openstack-cli/launch-a-VM-using-openstack-CLI/index.html b/openstack/openstack-cli/launch-a-VM-using-openstack-CLI/index.html new file mode 100644 index 00000000..c7f96b6b --- /dev/null +++ b/openstack/openstack-cli/launch-a-VM-using-openstack-CLI/index.html @@ -0,0 +1,4920 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Launch a VM using OpenStack CLI

+

First find the following details using openstack command, we would required +these details during the creation of virtual machine.

+
    +
  • +

    Flavor

    +
  • +
  • +

    Image

    +
  • +
  • +

    Network

    +
  • +
  • +

    Security Group

    +
  • +
  • +

    Key Name

    +
  • +
+

Get the flavor list using below openstack command:

+
openstack flavor list
++--------------------------------------+-------------+--------+------+-----------+-------+-----------+
+| ID                                   | Name        |    RAM | Disk | Ephemeral | VCPUs | Is Public |
++--------------------------------------+-------------+--------+------+-----------+-------+-----------+
+| 12ded228-1a7f-4d35-b994-7dd394a6ca90 |gpu-su-a100.2| 196608 |   20 |         0 |    24 | True      |
+| 15581358-3e81-4cf2-a5b8-c0fd2ad771b4 | mem-su.8    |  65536 |   20 |         0 |     8 | True      |
+| 17521416-0ecf-4d85-8d4c-ec6fd1bc5f9d | cpu-su.1    |   2048 |   20 |         0 |     1 | True      |
+| 2b1dbea2-736d-4b85-b466-4410bba35f1e | cpu-su.8    |  16384 |   20 |         0 |     8 | True      |
+| 2f33578f-c3df-4210-b369-84a998d77dac | mem-su.4    |  32768 |   20 |         0 |     4 | True      |
+| 4498bfdb-5342-4e51-aa20-9ee74e522d59 | mem-su.1    |   8192 |   20 |         0 |     1 | True      |
+| 7f2f5f4e-684b-4c24-bfc6-3fce9cf1f446 | mem-su.16   | 131072 |   20 |         0 |    16 | True      |
+| 8c05db2f-6696-446b-9319-c32341a09c41 | cpu-su.16   |  32768 |   20 |         0 |    16 | True      |
+| 9662b5b2-aeaa-4d56-9bd3-450deee668af | cpu-su.4    |   8192 |   20 |         0 |     4 | True      |
+| b3377fdd-fd0f-4c88-9b4b-3b5c8ada0732 |gpu-su-a100.1|  98304 |   20 |         0 |    12 | True      |
+| e9125ab0-c8df-4488-a252-029c636cbd0f | mem-su.2    |  16384 |   20 |         0 |     2 | True      |
+| ee6417bd-7cd4-4431-a6ce-d09f0fba3ba9 | cpu-su.2    |   4096 |   20 |         0 |     2 | True      |
++--------------------------------------+------------+--------+------+-----------+-------+------------+
+
+

Get the image name and its ID,

+
openstack image list  | grep almalinux-9
+| 263f045e-86c6-4344-b2de-aa475dbfa910 | almalinux-9-x86_64  | active |
+
+

Get Private Virtual network details, which will be attached to the VM:

+
openstack network list
++--------------------------------------+-----------------+--------------------------------------+
+| ID                                   | Name            | Subnets                              |
++--------------------------------------+-----------------+--------------------------------------+
+| 43613b84-e1fb-44a4-b1ea-c530edc49018 | provider        | 1cbbb98d-3b57-4f6d-8053-46045904d910 |
+| 8a91900b-d43c-474d-b913-930283e0bf43 | default_network | e62ce2fd-b11c-44ce-b7cc-4ca943e75a23 |
++--------------------------------------+-----------------+--------------------------------------+
+
+

Find the Security Group:

+
openstack security group list
++--------------------------------------+----------------------------------+-----------------------+----------------------------------+------+
+| ID                                   | Name                             | Description            |Project                          | Tags |
++--------------------------------------+----------------------------------+-----------------------+----------------------------------+------+
+| 8285530a-34e3-4d96-8e01-a7b309a91f9f | default                          | Default security group |8ae3ae25c3a84c689cd24c48785ca23a | []   |
+| bbb738d0-45fb-4a9a-8bc4-a3eafeb49ba7 | ssh_only                         |                        |8ae3ae25c3a84c689cd24c48785ca23a | []   |
++--------------------------------------+----------------------------------+-----------------------+----------------------------------+------+
+
+

Find the Key pair, in my case you can choose your own,

+
openstack keypair list | grep -i cloud_key
+| cloud_key | d5:ab:dc:1f:e5:08:44:7f:a6:21:47:23:85:32:cc:04 | ssh  |
+
+
+

Note

+

Above details will be different for you based on your project and env.

+
+

Launch an instance from an Image

+

Now we have all the details, let’s create a virtual machine using "openstack +server create" command

+

Syntax :

+
openstack server create --flavor {Flavor-Name-Or-Flavor-ID } \
+    --image {Image-Name-Or-Image-ID} \
+    --nic net-id={Network-ID} \
+    --user-data USER-DATA-FILE \
+    --security-group {Security_Group_ID} \
+    --key-name {Keypair-Name} \
+    --property KEY=VALUE \
+    <Instance_Name>
+
+
+

Important Note

+

If you boot an instance with an "Instance_Name" greater than 63 +characters, Compute truncates it automatically when turning it into a +hostname to ensure the correct functionality of dnsmasq.

+
+

Optionally, you can provide a key name for access control and a security group +for security.

+

You can also include metadata key and value pairs: --key-name {Keypair-Name}. +For example, you can add a description for your server by providing the +--property description="My Server" parameter.

+

You can pass user data in a local file at instance launch by using the +--user-data USER-DATA-FILE parameter. If you do not provide a key pair, you +will be unable to access the instance.

+

You can also place arbitrary local files into the instance file system at +creation time by using the --file <dest-filename=source-filename> parameter. +You can store up to five files. +For example, if you have a special authorized keys file named +special_authorized_keysfile that you want to put on the instance rather than +using the regular SSH key injection, you can add the –file option as shown in +the following example.

+
--file /root/.ssh/authorized_keys=special_authorized_keysfile
+
+

To create a VM in Specific "Availability Zone and compute Host" specify +--availability-zone {Availbility-Zone-Name}:{Compute-Host} in above syntax.

+

Example:

+
openstack server create --flavor cpu-su.2 \
+    --image almalinux-8-x86_64 \
+    --nic net-id=8ee63932-464b-4999-af7e-949190d8fe93 \
+    --security-group default \
+    --key-name cloud_key \
+    --property description="My Server" \
+    my-vm
+
+

NOTE: To get more help on "openstack server create" command , use:

+
openstack -h server create
+
+

Detailed syntax:

+
openstack server create
+  (--image <image> | --volume <volume>)
+  --flavor <flavor>
+  [--security-group <security-group>]
+  [--key-name <key-name>]
+  [--property <key=value>]
+  [--file <dest-filename=source-filename>]
+  [--user-data <user-data>]
+  [--availability-zone <zone-name>]
+  [--block-device-mapping <dev-name=mapping>]
+  [--nic <net-id=net-uuid,v4-fixed-ip=ip-addr,v6-fixed-ip=ip-addr,port-id=port-uuid,auto,none>]
+  [--network <network>]
+  [--port <port>]
+  [--hint <key=value>]
+  [--config-drive <config-drive-volume>|True]
+  [--min <count>]
+  [--max <count>]
+  [--wait]
+  <server-name>
+
+
+

Note

+

Similarly, we can lauch a VM using bootable "Volume" as described here.

+
+

Now verify the test vm "my-vm" is "Running" using the following commands:

+
openstack server list | grep my-vm
+
+

OR,

+
openstack server show my-vm
+
+

Check console of virtual machine

+

The console for a Linux VM can be displayed using console log.

+
openstack console log show --line 20 my-vm
+
+

Associating a Floating IP to VM

+

To Associate a Floating IP to VM, first get the unused Floating IP using the +following command:

+
openstack floating ip list | grep None | head -2
+| 071f08ac-cd10-4b89-aee4-856ead8e3ead | 169.144.107.154 | None |
+None                                 |
+| 1baf4232-9cb7-4a44-8684-c604fa50ff60 | 169.144.107.184 | None |
+None                                 |
+
+

Now Associate the first IP to the server using the following command:

+
openstack server add floating ip my-vm 169.144.107.154
+
+

Use the following command to verify whether Floating IP is assigned to the VM +or not:

+
openstack server list | grep my-vm
+| 056c0937-6222-4f49-8405-235b20d173dd | my-vm | ACTIVE  | ...
+nternal=192.168.15.62, 169.144.107.154 |
+
+

Remove existing floating ip from the VM

+
openstack server remove floating ip <INSTANCE_NAME_OR_ID> <FLOATING_IP_ADDRESS>
+
+

Get all available security group in your project

+
openstack security group list
++--------------------------------------+----------+-----------------------+----------------------------------+------+
+| 3ca248ac-56ac-4e5f-a57c-777ed74bbd7c | default  | Default security group |
+f01df1439b3141f8b76e68a3b58ef74a | []   |
+| 5cdc5f33-78fc-4af8-bf25-60b8d4e5db2a | ssh_only | Enable SSH access.     |
+f01df1439b3141f8b76e68a3b58ef74a | []   |
++--------------------------------------+----------+-----------------------+----------------------------------+------+
+
+

Add existing security group to the VM

+
openstack server add security group <INSTANCE_NAME_OR_ID> <SECURITY_GROUP>
+
+

Example:

+
openstack server add security group my-vm ssh_only
+
+

Remove existing security group from the VM

+
openstack server remove security group <INSTANCE_NAME_OR_ID> <SECURITY_GROUP>
+
+

Example:

+
openstack server remove security group my-vm ssh_only
+
+

Alternatively, you can use the openstack port unset command to remove the +group from a port:

+
openstack port unset --security-group <SECURITY_GROUP> <PORT>
+
+

Adding volume to the VM

+
openstack server add volume
+  [--device <device>]
+  <INSTANCE_NAME_OR_ID>
+  <VOLUME_NAME_OR_ID>
+
+

Remove existing volume from the VM

+
openstack server remove volume <INSTANCE_NAME_OR_ID> <volume>
+
+

Reboot a virtual machine

+
openstack server reboot my-vm
+
+

Deleting Virtual Machine from Command Line

+
openstack server delete my-vm
+
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/openstack-cli/openstack-CLI/index.html b/openstack/openstack-cli/openstack-CLI/index.html new file mode 100644 index 00000000..a5ab1248 --- /dev/null +++ b/openstack/openstack-cli/openstack-CLI/index.html @@ -0,0 +1,4809 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+ +
+
+ + + +
+
+ + + + + + + +

OpenStack CLI

+

References

+

OpenStack Command Line Client(CLI) Cheat Sheet

+

The OpenStack CLI is designed for interactive use. OpenStackClient (aka OSC) is +a command-line client for OpenStack that brings the command set for Compute, +Identity, Image, Object Storage and Block Storage APIs together in a single +shell with a uniform command structure. OpenStackClient is primarily configured +using command line options and environment variables. Most of those settings +can also be placed into a configuration file to simplify managing multiple +cloud configurations. Most global options have a corresponding environment +variable that may also be used to set the value. If both are present, the +command-line option takes priority.

+

It's also possible to call it from a bash script or similar, but typically it +is too slow for heavy scripting use.

+

Command Line setup

+

To use the CLI, you must create an application credentials and set the +appropriate environment variables.

+

You can download the environment file with the credentials from the OpenStack dashboard.

+
    +
  • +

    Log in to the NERC's OpenStack dashboard, choose + the project for which you want to download the OpenStack RC file.

    +
  • +
  • +

    Navigate to Identity -> Application Credentials.

    +
  • +
  • +

    Click on "Create Application Credential" button and provide a Name and Roles + for the application credential. All other fields are optional and leaving the + "Secret" field empty will set it to autogenerate (recommended).

    +
  • +
+

OpenStackClient Credentials Setup

+
+

Important Note

+

Please note that an application credential is only valid for a single + project, and to access multiple projects you need to create an application + credential for each. You can switch projects by clicking on the project name + at the top right corner and choosing from the dropdown under "Project".

+
+

After clicking "Create Application Credential" button, the ID and +Secret will be displayed and you will be prompted to Download openrc file +or to Download clouds.yaml. Both of these are different methods of +configuring the client for CLI access. Please save the file.

+

Configuration

+

The CLI is configured via environment variables and command-line options as +listed in Authentication.

+

Configuration Files

+

OpenStack RC File

+

Find the file (by default it will be named the same as the application +credential name with the suffix -openrc.sh where project is the name of your +OpenStack project).

+

Source your downloaded OpenStack RC File:

+
source app-cred-<Credential_Name>-openrc.sh
+
+
+

Important Note

+

When you source the file, environment variables are set for your current +shell. The variables enable the openstack client commands to communicate with +the OpenStack services that run in the cloud. This just stores your entry into +the environment variable - there's no validation at this stage. You can inspect +the downloaded file to retrieve the ID and Secret if necessary and see what +other environment variables are set.

+
+

clouds.yaml

+

clouds.yaml is a configuration file that contains everything needed to +connect to one or more clouds. It may contain private information and is +generally considered private to a user.

+

For more information on configuring the OpenStackClient with clouds.yaml +please see the OpenStack documentation.

+
+

Install the OpenStack command-line clients

+

For more information on configuring the OpenStackClient please see the +OpenStack documentation.

+

OpenStack Hello World

+

Generally, the OpenStack terminal client offers the following methods:

+
    +
  • +

    list: Lists information about objects currently in the cloud.

    +
  • +
  • +

    show: Displays information about a single object currently in the cloud.

    +
  • +
  • +

    create: Creates a new object in the cloud.

    +
  • +
  • +

    set: Edits an existing object in the cloud.

    +
  • +
+

To test that you have everything configured, try out some commands. The +following command lists all the images available to your project:

+
openstack image list
++--------------------------------------+---------------------+--------+
+| ID                                   | Name                | Status |
++--------------------------------------+---------------------+--------+
+| a9b48e65-0cf9-413a-8215-81439cd63966 | MS-Windows-2022     | active |
+| cfecb5d4-599c-4ffd-9baf-9cbe35424f97 | almalinux-8-x86_64  | active |
+| 263f045e-86c6-4344-b2de-aa475dbfa910 | almalinux-9-x86_64  | active |
+| 41fa5991-89d5-45ae-8268-b22224c772b2 | debian-10-x86_64    | active |
+| 99194159-fcd1-4281-b3e1-15956c275692 | fedora-36-x86_64    | active |
+| 74a33f77-fc42-4dd1-a5a2-55fb18fc50cc | rocky-8-x86_64      | active |
+| d7d41e5f-58f4-4ba6-9280-7fef9ac49060 | rocky-9-x86_64      | active |
+| 75a40234-702b-4ab7-9d83-f436b05827c9 | ubuntu-18.04-x86_64 | active |
+| 8c87cf6f-32f9-4a4b-91a5-0d734b7c9770 | ubuntu-20.04-x86_64 | active |
+| da314c41-19bf-486a-b8da-39ca51fd17de | ubuntu-22.04-x86_64 | active |
++--------------------------------------+---------------------+--------+
+
+

If you have launched some instances already, the following command shows a list +of your project's instances:

+
openstack server list --fit-width
++--------------------------------------+------------------+--------+----------------------------------------------+--------------------------+--------------+
+| ID                                   | Name             | Status | Networks                                     | Image                    |  Flavor      |
++--------------------------------------+------------------+--------+----------------------------------------------+--------------------------+--------------+
+| 1c96ba49-a20f-4c88-bbcf-93e2364365f5 |    vm-test       | ACTIVE | default_network=192.168.0.146, 199.94.60.4   | N/A (booted from volume) |  cpu-su.4     |
+| dd0d8053-ab88-4d4f-b5bc-97e7e2fe035a |    gpu-test      | ACTIVE | default_network=192.168.0.146, 199.94.60.4   | N/A (booted from volume) |  gpu-su-a100.1  |
++--------------------------------------+------------------+--------+----------------------------------------------+--------------------------+--------------+
+
+
+

How to fit the CLI output to your terminal?

+

You can use --fit-width at the end of the command to fit the output to your +terminal.

+
+

If you don't have any instances, you will get the error list index out of +range, which is why we didn't suggest this command for your first test:

+
openstack server list
+list index out of range
+
+

If you see this error:

+
openstack server list
+The request you have made requires authentication. (HTTP 401) (Request-ID: req-6a827bf3-d5e8-47f2-984c-b6edeeb2f7fb)
+
+

Then your environment variables are likely not configured correctly.

+

The most common reason is that you made a typo when entering your password. +Try sourcing the OpenStack RC file again and retyping it.

+

You can type openstack -h to see a list of available commands.

+
+

Note

+

This includes some admin-only commands.

+
+

If you try one of these by mistake, you might see this output:

+
openstack user list
+You are not authorized to perform the requested action: identity:list_users.
+(HTTP 403) (Request-ID: req-cafe1e5c-8a71-44ab-bd21-0e0f25414062)
+
+

Depending on your needs for API interaction, this might be sufficient.

+

If you just occasionally want to run 1 or 2 of these commands from your +terminal, you can do it manually or write a quick bash script that makes use of +this CLI.

+

However, this isn't a very optimized way to do complex interactions with +OpenStack. For that, you want to write scripts that interact with the python +SDK bindings directly.

+
+

Pro Tip

+

If you find yourself fiddling extensively with awk and grep to extract things +like project IDs from the CLI output, it's time to move on to using the client +libraries or the RESTful API directly in your scripts.

+
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/persistent-storage/attach-the-volume-to-an-instance/index.html b/openstack/persistent-storage/attach-the-volume-to-an-instance/index.html new file mode 100644 index 00000000..31799fb3 --- /dev/null +++ b/openstack/persistent-storage/attach-the-volume-to-an-instance/index.html @@ -0,0 +1,4632 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Attach The Volume To An Instance

+

Using Horizon dashboard

+

Once you're logged in to NERC's Horizon dashboard.

+

Navigate to Project -> Volumes -> Volumes.

+

In the Actions column, click the dropdown and select "Manage Attachments".

+

Volume Dropdown Options

+

From the menu, choose the instance you want to connect the volume to from +Attach to Instance, and click "Attach Volume".

+

Attach Volume

+

The volume now has a status of "In-use" and "Attached To" column shows which +instance it is attached to, and what device name it has.

+

This will be something like /dev/vdb but it can vary depending on the state +of your instance, and whether you have attached volumes before.

+

Make note of the device name of your volume.

+

Attaching Volume Successful

+

Using the CLI

+

Prerequisites:

+

To run the OpenStack CLI commands, you need to have:

+ +

To attach the volume to an instance using the CLI, do this:

+

Using the openstack client

+

When the status is 'available', the volume can be attached to a virtual machine +using the following openstack client command syntax:

+
openstack server add volume <INSTANCE_NAME_OR_ID> <VOLUME_NAME_OR_ID>
+
+

For example:

+
openstack server add volume test-vm my-volume
++-----------------------+--------------------------------------+
+| Field                 | Value                                |
++-----------------------+--------------------------------------+
+| ID                    | 5b5380bd-a15b-408b-8352-9d4219cf30f3 |
+| Server ID             | 8a876a17-3407-484c-85c4-8a46fbac1607 |
+| Volume ID             | 5b5380bd-a15b-408b-8352-9d4219cf30f3 |
+| Device                | /dev/vdb                             |
+| Tag                   | None                                 |
+| Delete On Termination | False                                |
++-----------------------+--------------------------------------+
+
+

where "test-vm" is the virtual machine and the second parameter, "my-volume" is +the volume created before.

+
+

Pro Tip

+

If your instance name <INSTANCE_NAME_OR_ID> and volume name <VOLUME_NAME_OR_ID> +include spaces, you need to enclose them in quotes, i.e. "<INSTANCE_NAME_OR_ID>" +and "<VOLUME_NAME_OR_ID>".

+

For example: openstack server remove volume "My Test Instance" "My Volume".

+
+

To verify the volume is attached to the VM

+
openstack volume list
++--------------------------------------+-----------------+--------+------+----------------------------------+
+| ID                                   | Name            | Status | Size | Attached to                      |
++--------------------------------------+-----------------+--------+------+----------------------------------+
+| 563048c5-d27b-4397-bb4e-034e0f4d9fa7 |                 | in-use |   20 | Attached to test-vm on /dev/vda  |
+| 5b5380bd-a15b-408b-8352-9d4219cf30f3 | my-volume       | in-use |   20 | Attached to test-vm on /dev/vdb  |
++--------------------------------------+-----------------+--------+------+----------------------------------+
+
+

The volume now has a status of "in-use" and "Attached To" column shows which +instance it is attached to, and what device name it has.

+

This will be something like /dev/vdb but it can vary depending on the state +of your instance, and whether you have attached volumes before.

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/persistent-storage/create-an-empty-volume/index.html b/openstack/persistent-storage/create-an-empty-volume/index.html new file mode 100644 index 00000000..73410577 --- /dev/null +++ b/openstack/persistent-storage/create-an-empty-volume/index.html @@ -0,0 +1,4640 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Create An Empty Volume

+

An empty volume is like an unformatted USB stick. We'll attach it to an +instance, create a filesystem on it, and mount it to the instance.

+

Using Horizon dashboard

+

Once you're logged in to NERC's Horizon dashboard, you can create a volume via +the "Volumes -> Volumes" page by clicking on the "Create Volume" button.

+

Navigate to Project -> Volumes -> Volumes.

+

Volumes

+

Click "Create Volume".

+

In the Create Volume dialog box, give your volume a name. The description +field is optional.

+

Create Volume

+

Choose "empty volume" from the Source dropdown. This will create a volume that +is like an unformatted hard disk. Choose a size (In GiB) for your volume. +Leave Type and Availibility Zone as it as. Only admin to the NERC OpenStack +will be able to manage volume types.

+

Click "Create Volume" button.

+

Checking the status of created volume will show:

+

"downloading" means that the volume contents is being transferred from the image +service to the volume service

+

In a few moments, the newly created volume will appear in the Volumes list with +the Status "available". "available" means the volume can now be used for booting. +A set of volume_image meta data is also copied from the image service.

+

Volumes List

+

Using the CLI

+

Prerequisites:

+

To run the OpenStack CLI commands, you need to have:

+ +

To create a volume using the CLI, do this:

+

Using the openstack client

+

This allows an arbitrary sized disk to be attached to your virtual machine, like +plugging in a USB stick. The steps below create a disk of 20 gibibytes (GiB) with +name "my-volume".

+
openstack volume create --size 20 my-volume
+
++---------------------+--------------------------------------+
+| Field               | Value                                |
++---------------------+--------------------------------------+
+| attachments         | []                                   |
+| availability_zone   | nova                                 |
+| bootable            | false                                |
+| consistencygroup_id | None                                 |
+| created_at          | 2024-02-03T17:06:05.000000           |
+| description         | None                                 |
+| encrypted           | False                                |
+| id                  | 5b5380bd-a15b-408b-8352-9d4219cf30f3 |
+| multiattach         | False                                |
+| name                | my-volume                            |
+| properties          |                                      |
+| replication_status  | None                                 |
+| size                | 20                                   |
+| snapshot_id         | None                                 |
+| source_volid        | None                                 |
+| status              | creating                             |
+| type                | tripleo                              |
+| updated_at          | None                                 |
+| user_id             | 938eb8bfc72e4ca3ad2b94e2eb4059f7     |
++---------------------+--------------------------------------+
+
+

To view newly created volume

+
openstack volume list
++--------------------------------------+-----------------+-----------+------+----------------------------------+
+| ID                                   | Name            | Status    | Size | Attached to                      |
++--------------------------------------+-----------------+-----------+------+----------------------------------+
+| 563048c5-d27b-4397-bb4e-034e0f4d9fa7 |                 | in-use    |   20 | Attached to test-vm on /dev/vda  |
+| 5b5380bd-a15b-408b-8352-9d4219cf30f3 | my-volume       | available |   20 |                                  |
++--------------------------------------+-----------------+-----------+------+----------------------------------+
+
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/persistent-storage/delete-volumes/index.html b/openstack/persistent-storage/delete-volumes/index.html new file mode 100644 index 00000000..0784fa32 --- /dev/null +++ b/openstack/persistent-storage/delete-volumes/index.html @@ -0,0 +1,4593 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Delete Volumes

+

Using Horizon dashboard

+

Once you're logged in to NERC's Horizon dashboard.

+

Navigate to Project -> Volumes -> Volumes.

+

Select the volume or volumes that you want to delete.

+

Click "Delete Volumes" button.

+

In the Confirm Delete Volumes window, click the Delete Volumes button to +confirm the action.

+
+

Unable to Delete Volume

+

You cannot delete a bootable volume that is actively in use by a running +VM. If you really want to delete such volume then first delete the insance and +then you are allowed to delete the detached volume. Before deleting +Please make sure during the launch of this insance is using the default +selected No for "Delete Volume on Instance Delete" configuration option. +If you had set this configuration "Yes" for "Delete Volume on Instance Delete", +then deleting the instance will automatically remove the associated volume. +Launch Instance With Persistent Volume

+
+

Using the CLI

+

Prerequisites:

+

To run the OpenStack CLI commands, you need to have:

+ +

To delete a volume using the CLI, do this:

+

Using the openstack client

+

The following openstack client command syntax can be used to delete a volume:

+
openstack volume delete <VOLUME_NAME_OR_ID>
+
+

For example:

+
openstack volume delete my-volume
+
+
+

Pro Tip

+

If your volume name <VOLUME_NAME_OR_ID> include spaces, you need to enclose +them in quotes, i.e. "<VOLUME_NAME_OR_ID>".

+

For example: openstack volume delete "My Volume".

+
+

Your volume will now go into state 'deleting' and completely disappear from the +openstack volume list output.

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/persistent-storage/detach-a-volume/index.html b/openstack/persistent-storage/detach-a-volume/index.html new file mode 100644 index 00000000..8d8cd7fe --- /dev/null +++ b/openstack/persistent-storage/detach-a-volume/index.html @@ -0,0 +1,4665 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Detach A Volume and Attach it to an instance

+

Detach A Volume

+

Using Horizon dashboard

+

Once you're logged in to NERC's Horizon dashboard.

+

Navigate to Project -> Volumes -> Volumes.

+

To detach a mounted volume by going back to "Manage Attachments" and choosing +Detach Volume.

+

This will popup the following interface to proceed:

+

Detach a volume

+
+

Unable to Detach Volume

+

If your bootable volume that is attached to a VM then that volume cannot be +detached as this is a root device volume. This bootable volume is created when +you launch an instance from an Image or an Instance Snapshot, and the +choice for utilizing persistent storage is configured by selecting the Yes +option for "Create New Volume". If you explicitly chosen as "No" for this option +then there will be no attach volume created for the instance instead an Ephemeral +disk storage is used.

+

Launch Instance Set Create New Volume

+
+

Using the CLI

+

Prerequisites:

+

To run the OpenStack CLI commands, you need to have:

+ +

Using the openstack client

+

The following openstack client command syntax can be used to detach a volume +from a VM:

+
openstack server remove volume <INSTANCE_NAME_OR_ID> <VOLUME_NAME_OR_ID>.
+
+

For example:

+
openstack server remove volume test-vm my-volume.
+
+

where "test-vm" is the virtual machine and the second parameter, "my-volume" is +the volume created before and attached to the VM and can be shown in +openstack volume list.

+
+

Pro Tip

+

If your instance name <INSTANCE_NAME_OR_ID> and volume name <VOLUME_NAME_OR_ID> +include spaces, you need to enclose them in quotes, i.e. "<INSTANCE_NAME_OR_ID>" +and "<VOLUME_NAME_OR_ID>".

+

For example: openstack server remove volume "My Test Instance" "My Volume".

+
+

Check that the volume is in state 'available' again.

+

If that's the case, the volume is now ready to either be attached to another +virtual machine or, if it is not needed any longer, to be completely deleted +(please note that this step cannot be reverted!).

+

Attach the detached volume to an instance

+

Once it is successfully detached, you can use "Manage Attachments" to attach it +to another instance if desired as explained here.

+

OR,

+

You can attach the existing volume (Detached!) to the new instance as shown below:

+

Attaching Volume to an Instance

+

After this run the following commands as root user to mount it:

+
mkdir /mnt/test_volume
+mount /dev/vdb /mnt/test_volume
+
+

All the previous data from previous instance will be available under the mounted +folder at /mnt/test_volume.

+
+

Very Important Note

+

Also, a given volume might not get the same device name the second time you +attach it to an instance.

+
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/persistent-storage/extending-volume/index.html b/openstack/persistent-storage/extending-volume/index.html new file mode 100644 index 00000000..1db5667c --- /dev/null +++ b/openstack/persistent-storage/extending-volume/index.html @@ -0,0 +1,4606 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Extending Volume

+

A volume can be made larger while maintaining the existing contents, assuming the +file system supports resizing. We can extend a volume that is not attached to any +VM and in "Available" status.

+

The steps are as follows:

+
    +
  • +

    Extend the volume to its new size

    +
  • +
  • +

    Extend the filesystem to its new size

    +
  • +
+

Using Horizon dashboard

+

Once you're logged in to NERC's Horizon dashboard.

+

Navigate to Project -> Volumes -> Volumes.

+

Extending Volume

+

Specify, the new extened size in GiB:

+

Volume New Extended Size

+

Using the CLI

+

Prerequisites:

+

To run the OpenStack CLI commands, you need to have:

+ +

Using the openstack client

+

The following openstack client command syntax can be used to extend any existing +volume from its previous size to a new size of :

+
openstack volume set --size <NEW_SIZE_IN_GiB> <VOLUME_NAME_OR_ID>
+
+

For example:

+
openstack volume set --size 100 my-volume
+
+

where "my-volume" is the existing volume with a size of 80 GiB and is going +to be extended to a new size of 100 GiB."

+
+

Pro Tip

+

If your volume name <VOLUME_NAME_OR_ID> includes spaces, you need to enclose +them in quotes, i.e. "<VOLUME_NAME_OR_ID>".

+

For example: openstack volume set --size 100 "My Volume".

+
+

For windows systems, please follow the provider documentation.

+
+

Please note

+
    +
  • +

    Volumes can be made larger, but not smaller. There is no support for +shrinking existing volumes.

    +
  • +
  • +

    The procedure given above has been tested with ext4 and XFS filesystems only.

    +
  • +
+
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/persistent-storage/format-and-mount-the-volume/index.html b/openstack/persistent-storage/format-and-mount-the-volume/index.html new file mode 100644 index 00000000..9ced3894 --- /dev/null +++ b/openstack/persistent-storage/format-and-mount-the-volume/index.html @@ -0,0 +1,4638 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Format And Mount The Volume

+

Prerequisites:

+

Before formatting and mounting the volume, you need to have already created a +new volume as referred here and attached it to any +running VM, as described here.

+

For Linux based virtual machine

+

To verify that the newly created volume, "my-volume", exists and is attached to +a VM, "test-vm", run this openstack client command:

+
openstack volume list
++--------------------------------------+-----------------+--------+------+----------------------------------+
+| ID                                   | Name            | Status | Size | Attached to                      |
++--------------------------------------+-----------------+--------+------+----------------------------------+
+| 563048c5-d27b-4397-bb4e-034e0f4d9fa7 |                 | in-use |   20 | Attached to test-vm on /dev/vda  |
+| 5b5380bd-a15b-408b-8352-9d4219cf30f3 | my-volume       | in-use |   20 | Attached to test-vm on /dev/vdb  |
++--------------------------------------+-----------------+--------+------+----------------------------------+
+
+

The volume has a status of "in-use" and "Attached To" column shows which instance +it is attached to, and what device name it has.

+

This will be something like /dev/vdb but it can vary depending on the state +of your instance, and whether you have attached volumes before.

+

Make note of the device name of your volume.

+

SSH into your instance. You should now see the volume as an additional disk in +the output of sudo fdisk -l or lsblk or cat /proc/partitions.

+
# lsblk
+NAME    MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
+...
+vda     254:0    0   10G  0 disk
+├─vda1  254:1    0  9.9G  0 part /
+├─vda14 254:14   0    4M  0 part
+└─vda15 254:15   0  106M  0 part /boot/efi
+vdb     254:16   0    1G  0 disk
+
+

Here, we see the volume as the disk vdb, which matches the /dev/vdb/ we previously +noted in the "Attached To" column.

+

Create a filesystem on the volume and mount it. In this example, we will create +an ext4 filesystem:

+

Run the following commands as root user:

+
mkfs.ext4 /dev/vdb
+mkdir /mnt/test_volume
+mount /dev/vdb /mnt/test_volume
+df -H
+
+

The volume is now available at the mount point:

+
lsblk
+NAME    MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT
+...
+vda     254:0    0   10G  0 disk
+├─vda1  254:1    0  9.9G  0 part /
+├─vda14 254:14   0    4M  0 part
+└─vda15 254:15   0  106M  0 part /boot/efi
+vdb     254:16   0    1G  0 disk /mnt/test_volume
+
+

If you place data in the directory /mnt/test_volume, detach the volume, and +mount it to another instance, the second instance will have access to the data.

+
+

Important Note

+

In this case it's easy to spot because there is only one additional disk attached +to the instance, but it's important to keep track of the device name, especially +if you have multiple volumes attached.

+
+

For Windows virtual machine

+

Here, we create an empty volume following the steps outlined in this documentation.

+

Please make sure you are creating volume of the size 100 GiB:

+

Create Volume for Windows VM

+

Then attach the newly created volume to a running Windows VM:

+

Attach Volume to a running Windows VM

+

Login remote desktop using the Floating IP attached to the Windows VM:

+

Connect to Remote Instance using Floating IP

+

Prompted Administrator Login

+
+

What is the user login for Windows Server 2022?

+

The default username is "Administrator," and the password is the one you set +using the user data PowerShell script during the launch as +described here.

+
+

Successfully Remote Connected Instance

+

Once connected search for "Disk Management" from Windows search box. This will +show all attached disk as Unknown and Offline as shown here:

+

Windows Disk Management

+

In Disk Management, select and hold (or right-click) the disk you want to +initialize, and then select "Initialize Disk". If the disk is listed as Offline, +first select and hold (or right-click) the disk, and then select "Online".

+

Windows Set Disk Online

+

Windows Initialize Disk

+

In the Initialize Disk dialog box, make sure the correct disk is selected, and +then choose OK to accept the default partition style. If you need to change the +partition style (GPT or MBR), see Compare partition styles - GPT and MBR.

+

Windows Disk Partition Style

+

Format the New Volume:

+
    +
  • +

    Select and hold (or right-click) the unallocated space of the new disk.

    +
  • +
  • +

    Select "New Simple Volume" and follow the wizard to create a new partition.

    +
  • +
+

Windows Simple Volume Wizard Start

+
    +
  • +

    Choose the file system (usually NTFS for Windows).

    +
  • +
  • +

    Assign a drive letter or mount point.

    +
  • +
+

Complete Formatting:

+
    +
  • +

    Complete the wizard to format the new volume.

    +
  • +
  • +

    Once formatting is complete, the new volume should be visible in File Explorer + as shown below:

    +
  • +
+

Windows Simple Volume Wizard Start

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/persistent-storage/images/attach-volume-to-an-instance.png b/openstack/persistent-storage/images/attach-volume-to-an-instance.png new file mode 100644 index 00000000..b388141d Binary files /dev/null and b/openstack/persistent-storage/images/attach-volume-to-an-instance.png differ diff --git a/openstack/persistent-storage/images/attach-volume-to-an-win-instance.png b/openstack/persistent-storage/images/attach-volume-to-an-win-instance.png new file mode 100644 index 00000000..e7e9854a Binary files /dev/null and b/openstack/persistent-storage/images/attach-volume-to-an-win-instance.png differ diff --git a/openstack/persistent-storage/images/choose_S3_protocol.png b/openstack/persistent-storage/images/choose_S3_protocol.png new file mode 100644 index 00000000..f65d1a88 Binary files /dev/null and b/openstack/persistent-storage/images/choose_S3_protocol.png differ diff --git a/openstack/persistent-storage/images/config_winscp.png b/openstack/persistent-storage/images/config_winscp.png new file mode 100644 index 00000000..9d5283cc Binary files /dev/null and b/openstack/persistent-storage/images/config_winscp.png differ diff --git a/openstack/persistent-storage/images/container-file-upload-success.png b/openstack/persistent-storage/images/container-file-upload-success.png new file mode 100644 index 00000000..81e5e4fd Binary files /dev/null and b/openstack/persistent-storage/images/container-file-upload-success.png differ diff --git a/openstack/persistent-storage/images/container-public-access-setting.png b/openstack/persistent-storage/images/container-public-access-setting.png new file mode 100644 index 00000000..5bbe4a25 Binary files /dev/null and b/openstack/persistent-storage/images/container-public-access-setting.png differ diff --git a/openstack/persistent-storage/images/container-upload-popup.png b/openstack/persistent-storage/images/container-upload-popup.png new file mode 100644 index 00000000..cffefdad Binary files /dev/null and b/openstack/persistent-storage/images/container-upload-popup.png differ diff --git a/openstack/persistent-storage/images/create-container.png b/openstack/persistent-storage/images/create-container.png new file mode 100644 index 00000000..39200c15 Binary files /dev/null and b/openstack/persistent-storage/images/create-container.png differ diff --git a/openstack/persistent-storage/images/create-transfer-a-volume.png b/openstack/persistent-storage/images/create-transfer-a-volume.png new file mode 100644 index 00000000..8daae749 Binary files /dev/null and b/openstack/persistent-storage/images/create-transfer-a-volume.png differ diff --git a/openstack/persistent-storage/images/create_volume.png b/openstack/persistent-storage/images/create_volume.png new file mode 100644 index 00000000..c8d1945c Binary files /dev/null and b/openstack/persistent-storage/images/create_volume.png differ diff --git a/openstack/persistent-storage/images/create_volume_win.png b/openstack/persistent-storage/images/create_volume_win.png new file mode 100644 index 00000000..9151d6b9 Binary files /dev/null and b/openstack/persistent-storage/images/create_volume_win.png differ diff --git a/openstack/persistent-storage/images/cyberduck-open-connection.png b/openstack/persistent-storage/images/cyberduck-open-connection.png new file mode 100644 index 00000000..f750eb88 Binary files /dev/null and b/openstack/persistent-storage/images/cyberduck-open-connection.png differ diff --git a/openstack/persistent-storage/images/cyberduck-s3-configuration.png b/openstack/persistent-storage/images/cyberduck-s3-configuration.png new file mode 100644 index 00000000..eadfb833 Binary files /dev/null and b/openstack/persistent-storage/images/cyberduck-s3-configuration.png differ diff --git a/openstack/persistent-storage/images/cyberduck-select-Amazon-s3.png b/openstack/persistent-storage/images/cyberduck-select-Amazon-s3.png new file mode 100644 index 00000000..65f696ea Binary files /dev/null and b/openstack/persistent-storage/images/cyberduck-select-Amazon-s3.png differ diff --git a/openstack/persistent-storage/images/cyberduck-successful-connection.png b/openstack/persistent-storage/images/cyberduck-successful-connection.png new file mode 100644 index 00000000..33af80cb Binary files /dev/null and b/openstack/persistent-storage/images/cyberduck-successful-connection.png differ diff --git a/openstack/persistent-storage/images/detach-volume-from-an-instance.png b/openstack/persistent-storage/images/detach-volume-from-an-instance.png new file mode 100644 index 00000000..792d219b Binary files /dev/null and b/openstack/persistent-storage/images/detach-volume-from-an-instance.png differ diff --git a/openstack/persistent-storage/images/disable_public_access_container.png b/openstack/persistent-storage/images/disable_public_access_container.png new file mode 100644 index 00000000..15106354 Binary files /dev/null and b/openstack/persistent-storage/images/disable_public_access_container.png differ diff --git a/openstack/persistent-storage/images/download-file-from-container.png b/openstack/persistent-storage/images/download-file-from-container.png new file mode 100644 index 00000000..3c61c096 Binary files /dev/null and b/openstack/persistent-storage/images/download-file-from-container.png differ diff --git a/openstack/persistent-storage/images/ec2_credentials.png b/openstack/persistent-storage/images/ec2_credentials.png new file mode 100644 index 00000000..5c7508e5 Binary files /dev/null and b/openstack/persistent-storage/images/ec2_credentials.png differ diff --git a/openstack/persistent-storage/images/extending_volumes.png b/openstack/persistent-storage/images/extending_volumes.png new file mode 100644 index 00000000..f5791175 Binary files /dev/null and b/openstack/persistent-storage/images/extending_volumes.png differ diff --git a/openstack/persistent-storage/images/folder-upload-container.png b/openstack/persistent-storage/images/folder-upload-container.png new file mode 100644 index 00000000..4da21223 Binary files /dev/null and b/openstack/persistent-storage/images/folder-upload-container.png differ diff --git a/openstack/persistent-storage/images/fuse-config.png b/openstack/persistent-storage/images/fuse-config.png new file mode 100644 index 00000000..f7dda16e Binary files /dev/null and b/openstack/persistent-storage/images/fuse-config.png differ diff --git a/openstack/persistent-storage/images/instance-create-new-volume.png b/openstack/persistent-storage/images/instance-create-new-volume.png new file mode 100644 index 00000000..51775f93 Binary files /dev/null and b/openstack/persistent-storage/images/instance-create-new-volume.png differ diff --git a/openstack/persistent-storage/images/instance-delete-volume-delete.png b/openstack/persistent-storage/images/instance-delete-volume-delete.png new file mode 100644 index 00000000..610e14aa Binary files /dev/null and b/openstack/persistent-storage/images/instance-delete-volume-delete.png differ diff --git a/openstack/persistent-storage/images/instance-persistent-storage-option.png b/openstack/persistent-storage/images/instance-persistent-storage-option.png new file mode 100644 index 00000000..7fd0c0b9 Binary files /dev/null and b/openstack/persistent-storage/images/instance-persistent-storage-option.png differ diff --git a/openstack/persistent-storage/images/object-store.png b/openstack/persistent-storage/images/object-store.png new file mode 100644 index 00000000..5b76ca81 Binary files /dev/null and b/openstack/persistent-storage/images/object-store.png differ diff --git a/openstack/persistent-storage/images/prompted_administrator_login.png b/openstack/persistent-storage/images/prompted_administrator_login.png new file mode 100644 index 00000000..dc7da852 Binary files /dev/null and b/openstack/persistent-storage/images/prompted_administrator_login.png differ diff --git a/openstack/persistent-storage/images/redis-server-config.png b/openstack/persistent-storage/images/redis-server-config.png new file mode 100644 index 00000000..f39da058 Binary files /dev/null and b/openstack/persistent-storage/images/redis-server-config.png differ diff --git a/openstack/persistent-storage/images/remote_connected_instance.png b/openstack/persistent-storage/images/remote_connected_instance.png new file mode 100644 index 00000000..e79ca6d9 Binary files /dev/null and b/openstack/persistent-storage/images/remote_connected_instance.png differ diff --git a/openstack/persistent-storage/images/remote_connection_floating_ip.png b/openstack/persistent-storage/images/remote_connection_floating_ip.png new file mode 100644 index 00000000..2e11ce9b Binary files /dev/null and b/openstack/persistent-storage/images/remote_connection_floating_ip.png differ diff --git a/openstack/persistent-storage/images/s3fs_assets_download.png b/openstack/persistent-storage/images/s3fs_assets_download.png new file mode 100644 index 00000000..b3725fa3 Binary files /dev/null and b/openstack/persistent-storage/images/s3fs_assets_download.png differ diff --git a/openstack/persistent-storage/images/successful_accepted_volume_transfer.png b/openstack/persistent-storage/images/successful_accepted_volume_transfer.png new file mode 100644 index 00000000..40cb3f37 Binary files /dev/null and b/openstack/persistent-storage/images/successful_accepted_volume_transfer.png differ diff --git a/openstack/persistent-storage/images/transfer-volume-initiated.png b/openstack/persistent-storage/images/transfer-volume-initiated.png new file mode 100644 index 00000000..dc9490f2 Binary files /dev/null and b/openstack/persistent-storage/images/transfer-volume-initiated.png differ diff --git a/openstack/persistent-storage/images/transfer-volume-name.png b/openstack/persistent-storage/images/transfer-volume-name.png new file mode 100644 index 00000000..44b9b82b Binary files /dev/null and b/openstack/persistent-storage/images/transfer-volume-name.png differ diff --git a/openstack/persistent-storage/images/upload-file-container.png b/openstack/persistent-storage/images/upload-file-container.png new file mode 100644 index 00000000..c3ff81a9 Binary files /dev/null and b/openstack/persistent-storage/images/upload-file-container.png differ diff --git a/openstack/persistent-storage/images/volume-transfer-accepted.png b/openstack/persistent-storage/images/volume-transfer-accepted.png new file mode 100644 index 00000000..d27b2dba Binary files /dev/null and b/openstack/persistent-storage/images/volume-transfer-accepted.png differ diff --git a/openstack/persistent-storage/images/volume-transfer-key.png b/openstack/persistent-storage/images/volume-transfer-key.png new file mode 100644 index 00000000..e50b2309 Binary files /dev/null and b/openstack/persistent-storage/images/volume-transfer-key.png differ diff --git a/openstack/persistent-storage/images/volume_attach.png b/openstack/persistent-storage/images/volume_attach.png new file mode 100644 index 00000000..acbf59d7 Binary files /dev/null and b/openstack/persistent-storage/images/volume_attach.png differ diff --git a/openstack/persistent-storage/images/volume_in_use.png b/openstack/persistent-storage/images/volume_in_use.png new file mode 100644 index 00000000..6b0ca1e6 Binary files /dev/null and b/openstack/persistent-storage/images/volume_in_use.png differ diff --git a/openstack/persistent-storage/images/volume_new_extended_size.png b/openstack/persistent-storage/images/volume_new_extended_size.png new file mode 100644 index 00000000..38232388 Binary files /dev/null and b/openstack/persistent-storage/images/volume_new_extended_size.png differ diff --git a/openstack/persistent-storage/images/volume_options.png b/openstack/persistent-storage/images/volume_options.png new file mode 100644 index 00000000..bb6f8ea3 Binary files /dev/null and b/openstack/persistent-storage/images/volume_options.png differ diff --git a/openstack/persistent-storage/images/volumes-in-a-new-project.png b/openstack/persistent-storage/images/volumes-in-a-new-project.png new file mode 100644 index 00000000..1546c5e4 Binary files /dev/null and b/openstack/persistent-storage/images/volumes-in-a-new-project.png differ diff --git a/openstack/persistent-storage/images/volumes.png b/openstack/persistent-storage/images/volumes.png new file mode 100644 index 00000000..4277834d Binary files /dev/null and b/openstack/persistent-storage/images/volumes.png differ diff --git a/openstack/persistent-storage/images/volumes_list.png b/openstack/persistent-storage/images/volumes_list.png new file mode 100644 index 00000000..53ad5ce9 Binary files /dev/null and b/openstack/persistent-storage/images/volumes_list.png differ diff --git a/openstack/persistent-storage/images/win_disk_management.png b/openstack/persistent-storage/images/win_disk_management.png new file mode 100644 index 00000000..970120ee Binary files /dev/null and b/openstack/persistent-storage/images/win_disk_management.png differ diff --git a/openstack/persistent-storage/images/win_disk_partition_style.png b/openstack/persistent-storage/images/win_disk_partition_style.png new file mode 100644 index 00000000..a91c445f Binary files /dev/null and b/openstack/persistent-storage/images/win_disk_partition_style.png differ diff --git a/openstack/persistent-storage/images/win_disk_simple_volume.png b/openstack/persistent-storage/images/win_disk_simple_volume.png new file mode 100644 index 00000000..40cda0f4 Binary files /dev/null and b/openstack/persistent-storage/images/win_disk_simple_volume.png differ diff --git a/openstack/persistent-storage/images/win_initialize_disk.png b/openstack/persistent-storage/images/win_initialize_disk.png new file mode 100644 index 00000000..34ef3d12 Binary files /dev/null and b/openstack/persistent-storage/images/win_initialize_disk.png differ diff --git a/openstack/persistent-storage/images/win_new_drive.png b/openstack/persistent-storage/images/win_new_drive.png new file mode 100644 index 00000000..5cbb31d5 Binary files /dev/null and b/openstack/persistent-storage/images/win_new_drive.png differ diff --git a/openstack/persistent-storage/images/win_set_disk_online.png b/openstack/persistent-storage/images/win_set_disk_online.png new file mode 100644 index 00000000..31d04bef Binary files /dev/null and b/openstack/persistent-storage/images/win_set_disk_online.png differ diff --git a/openstack/persistent-storage/images/winscp-login.png b/openstack/persistent-storage/images/winscp-login.png new file mode 100644 index 00000000..de5768a7 Binary files /dev/null and b/openstack/persistent-storage/images/winscp-login.png differ diff --git a/openstack/persistent-storage/images/winscp-new-session.png b/openstack/persistent-storage/images/winscp-new-session.png new file mode 100644 index 00000000..b8d42581 Binary files /dev/null and b/openstack/persistent-storage/images/winscp-new-session.png differ diff --git a/openstack/persistent-storage/images/winscp-perserve-timestamp-disable.png b/openstack/persistent-storage/images/winscp-perserve-timestamp-disable.png new file mode 100644 index 00000000..db6ad8c2 Binary files /dev/null and b/openstack/persistent-storage/images/winscp-perserve-timestamp-disable.png differ diff --git a/openstack/persistent-storage/images/winscp-successfully-connected.png b/openstack/persistent-storage/images/winscp-successfully-connected.png new file mode 100644 index 00000000..2dcad363 Binary files /dev/null and b/openstack/persistent-storage/images/winscp-successfully-connected.png differ diff --git a/openstack/persistent-storage/mount-the-object-storage/index.html b/openstack/persistent-storage/mount-the-object-storage/index.html new file mode 100644 index 00000000..6d4cd9c9 --- /dev/null +++ b/openstack/persistent-storage/mount-the-object-storage/index.html @@ -0,0 +1,6863 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Mount The Object Storage To An Instance

+

Pre-requisite

+

We are using following setting for this purpose to mount the object storage to an +NERC OpenStack VM:

+
    +
  • +

    1 Linux machine, ubuntu-22.04-x86_64 or your choice of Ubuntu OS image, + cpu-su.2 flavor with 2vCPU, 8GB RAM, 20GB storage - also assign Floating IP + to this VM.

    +
  • +
  • +

    Setup and enable your S3 API credentials:

    +
  • +
+

To access the API credentials, you must login through the OpenStack Dashboard +and navigate to "Projects > API Access" where you can download the "Download +OpenStack RC File" as well as the "EC2 Credentials".

+

EC2 Credentials

+

While clicking on "EC2 Credentials", this will download a file zip file +including ec2rc.sh file that has content similar to shown below. The important +parts are EC2_ACCESS_KEY and EC2_SECRET_KEY, keep them noted.

+
  #!/bin/bash
+
+  NOVARC=$(readlink -f "${BASH_SOURCE:-${0}}" 2>/dev/null) || NOVARC=$(python -c 'import os,sys; print os.path.abspath(os.path.realpath(sys.argv[1]))' "${BASH_SOURCE:-${0}}")
+  NOVA_KEY_DIR=${NOVARC%/*}
+  export EC2_ACCESS_KEY=...
+  export EC2_SECRET_KEY=...
+  export EC2_URL=https://localhost/notimplemented
+  export EC2_USER_ID=42 # nova does not use user id, but bundling requires it
+  export EC2_PRIVATE_KEY=${NOVA_KEY_DIR}/pk.pem
+  export EC2_CERT=${NOVA_KEY_DIR}/cert.pem
+  export NOVA_CERT=${NOVA_KEY_DIR}/cacert.pem
+  export EUCALYPTUS_CERT=${NOVA_CERT} # euca-bundle-image seems to require this set
+
+  alias ec2-bundle-image="ec2-bundle-image --cert ${EC2_CERT} --privatekey ${EC2_PRIVATE_KEY} --user 42 --ec2cert ${NOVA_CERT}"
+  alias ec2-upload-bundle="ec2-upload-bundle -a ${EC2_ACCESS_KEY} -s ${EC2_SECRET_KEY} --url ${S3_URL} --ec2cert ${NOVA_CERT}"
+
+

Alternatively, you can obtain your EC2 access keys using the openstack client:

+
  sudo apt install python3-openstackclient
+
+  openstack ec2 credentials list
+  +------------------+------------------+--------------+-----------+
+  | Access           | Secret           | Project ID   | User ID   |
+  +------------------+------------------+--------------+-----------+
+  | <EC2_ACCESS_KEY> | <EC2_SECRET_KEY> | <Project_ID> | <User_ID> |
+  +------------------+------------------+--------------+-----------+
+
+

OR, you can even create a new one by running:

+
  openstack ec2 credentials create
+
+
    +
  • +

    Source the downloaded OpenStack RC File from Projects > API Access by using: + source *-openrc.sh command. Sourcing the RC File will set the required environment + variables.

    +
  • +
  • +

    Allow Other User option by editing fuse config by editing /etc/fuse.conf file + and uncomment "user_allow_other" option.

    +
    sudo nano /etc/fuse.conf
    +
    +
  • +
+

The output going to look like this:

+

Fuse Config to Allow Other User

+
+

A comparative analysis of Mountpoint for S3, Goofys, and S3FS.

+

When choosing between S3 clients that enable the utilization of an object store +with applications expecting files, it's essential to consider the specific use +case and whether the convenience and compatibility provided by FUSE clients +match the project's requirements.

+

To delve into a comparative analysis of Mountpoint for S3, Goofys, and +S3FS, please read this blog post.

+
+

1. Using Mountpoint for Amazon S3

+

Mountpoint for Amazon S3 is a high-throughput +open-source file client designed to mount an Amazon S3 bucket as a local file system. +Mountpoint is optimized for workloads that need high-throughput read and write +access to data stored in S3 Object Storage through a file system interface.

+
+

Very Important Note

+

Mountpoint for Amazon S3 intentionally does not implement the full POSIX +standard specification for file systems. Mountpoint supports file-based workloads +that perform sequential and random reads, sequential (append only) writes, +and that don’t need full POSIX semantics.

+
+

Install Mountpoint

+

Access your virtual machine using SSH. Update the packages on your system and +install wget to be able to download the mount-s3 binary directly to your VM:

+
sudo apt update && sudo apt upgrade
+sudo apt install wget
+
+

Now, navigate to your home directory:

+
cd
+
+
    +
  1. +

    Download the Mountpoint for Amazon S3 package using wget command:

    +
    wget https://s3.amazonaws.com/mountpoint-s3-release/latest/x86_64/mount-s3.deb
    +
    +
  2. +
  3. +

    Install the package by entering the following command:

    +
    sudo apt-get install ./mount-s3.deb
    +
    +
  4. +
  5. +

    Verify that Mountpoint for Amazon S3 is successfully installed by entering the + following command:

    +
    mount-s3 --version
    +
    +

    You should see output similar to the following:

    +
    mount-s3 1.6.0
    +
    +
  6. +
+

Configuring and using Mountpoint

+

Make a folder to store your credentials:

+
mkdir ~/.aws/
+
+

Create file ~/.aws/credentials using your favorite text editor (for example +nano or vim). Add the following contents to it which requires the EC2_ACCESS_KEY +and EC2_SECRET_KEY keys that you noted from ec2rc.sh file (during the "Setup +and enable your S3 API credentials" step):

+
[nerc]
+aws_access_key_id=<EC2_ACCESS_KEY>
+aws_secret_access_key=<EC2_SECRET_KEY>
+
+

Save the file and exit the text editor.

+

Create a local directory as a mount point

+
mkdir -p ~/bucket1
+
+

Mount the Container locally using Mountpoint

+

The object storage container i.e. "bucket1" will be mounted in the directory ~/bucket1

+
mount-s3 --profile "nerc" --endpoint-url "https://stack.nerc.mghpcc.org:13808" --allow-other --force-path-style --debug bucket1 ~/bucket1/
+
+

In this command,

+
    +
  • +

    mount-s3 is the Mountpoint for Amazon S3 package as installed in /usr/bin/ + path we don't need to specify the full path.

    +
  • +
  • +

    --profile corresponds to the name given on the ~/.aws/credentials file i.e. + [nerc].

    +
  • +
  • +

    --endpoint-url corresponds to the Object Storage endpoint url for NERC Object + Storage. You don't need to modify this url.

    +
  • +
  • +

    --allow-other: Allows other users to access the mounted filesystem. This is + particularly useful when multiple users need to access the mounted S3 bucket. + Only allowed if user_allow_other is set in /etc/fuse.conf.

    +
  • +
  • +

    --force-path-style: Forces the use of path-style URLs when accessing the S3 + bucket. This is necessary when working with certain S3-compatible storage services + that do not support virtual-hosted-style URLs.

    +
  • +
  • +

    --debug: Enables debug mode, providing additional information about the mounting + process.

    +
  • +
  • +

    bucket1 is the name of the container which contains the NERC Object Storage + resources.

    +
  • +
  • +

    ~/bucket1 is the location of the folder in which you want to mount the Object + Storage filesystem.

    +
  • +
+
+

Important Note

+

Mountpoint automatically configures reasonable defaults for file system settings +such as permissions and performance. However, if you require finer control over +how the Mountpoint file system behaves, you can adjust these settings accordingly. +For further details, please refer to this resource.

+
+

In order to test whether the mount was successful, navigate to the directory in +which you mounted the NERC container repository, for example:

+
cd ~/bucket1
+
+

Use the ls command to list its content. You should see the output similar to this:

+
ls
+
+README.md   image.png   test-file
+
+

The NERC Object Storage container repository has now been mounted using Mountpoint.

+
+

Very Important Information

+

Please note that any of these Mountpoints is not persistent if your VM is +stopped or rebooted in the future. After each reboot, you will need to execute +the mounting command as mentioned above +again.

+
+

Automatically mounting an S3 bucket at boot

+

Mountpoint does not currently support automatically mounting a bucket at system +boot time by configuring them in the /etc/fstab. If you would like your bucket/s +to automatically mount when the machine is started you will need to either set up +a Cron Job in crontab +or using a service manager +like systemd.

+

Using a Cron Job

+

You need to create a Cron job so that the script runs each time your VM reboots, +remounting S3 Object Storage to your VM.

+
crontab -e
+
+

Add this command to the end of the file

+
@reboot sh /<Path_To_Directory>/script.sh
+
+

For example,

+
@reboot sh /home/ubuntu/script.sh
+
+

Create script.sh file paste the below code to it.

+
#!/bin/bash
+mount-s3 [OPTIONS] <BUCKET_NAME> <DIRECTORY>
+
+

For example,

+
#!/bin/bash
+mount-s3 --profile "nerc" --endpoint-url "https://stack.nerc.mghpcc.org:13808" --allow-other --force-path-style --debug bucket1 ~/bucket1/
+
+

Make the file executable by running the below command

+
chmod +x script.sh
+
+

Reboot your VM:

+
sudo reboot
+
+

Using a service manager like systemd by creating systemd unit file

+

Create directory in /root folder in which you will store the credentials:

+
sudo mkdir /root/.aws
+
+

Copy the credentials you created in your local directory to the .aws directory +in the /root folder:

+
sudo cp ~/.aws/credentials /root/.aws/
+
+
Create systemd unit file i.e. mountpoint-s3.service
+

Create a systemd service unit file that is going to execute the above script +and dynamically mount or unmount the container:

+
sudo nano /etc/systemd/system/mountpoint-s3.service
+
+

Edit the file to look like the below:

+
[Unit]
+Description=Mountpoint for Amazon S3 mount
+Documentation=https://docs.aws.amazon.com/AmazonS3/latest/userguide/mountpoint.html
+#Wants=network.target
+Wants=network-online.target
+#Requires=network-online.target
+AssertPathIsDirectory=/home/ubuntu/bucket1
+After=network-online.target
+
+[Service]
+Type=forking
+User=root
+Group=root
+ExecStart=/usr/bin/mount-s3 bucket1 /home/ubuntu/bucket1 \
+      --profile "nerc" \
+      --endpoint-url "https://stack.nerc.mghpcc.org:13808" \
+      --allow-other \
+      --force-path-style \
+      --debug
+
+ExecStop=/bin/fusermount -u /home/ubuntu/bucket1
+Restart=always
+RestartSec=10
+
+[Install]
+#WantedBy=remote-fs.target
+WantedBy=default.target
+
+
+

Important Note

+

The network-online.target lines ensure that mounting is not attempted until +there's a network connection available. The service is launched as soon as the +network is up and running, it mounts the bucket and remains active.

+
+
Launch the service
+

Now reload systemd deamon:

+
sudo systemctl daemon-reload
+
+

Start your service

+
sudo systemctl start mountpoint-s3.service
+
+

To check the status of your service

+
sudo systemctl status mountpoint-s3.service
+
+

To enable your service on every reboot

+
sudo systemctl enable --now mountpoint-s3.service
+
+
+

Information

+

The service name is based on the file name i.e. /etc/systemd/system/mountpoint-s3.service +so you can just use mountpoint-s3 instead of mountpoint-s3.service on all +above systemctl commands.

+

To debug you can use:

+

sudo systemctl status mountpoint-s3.service -l --no-pager or, +journalctl -u mountpoint-s3 --no-pager | tail -50

+
+

Verify, the service is running successfully in background as root user:

+
ps aux | grep mount-s3
+
+root       13585  0.0  0.0 1060504 11672 ?       Sl   02:00   0:00 /usr/bin/mount-s3 bucket1 /home/ubuntu/bucket1 --profile nerc --endpoint-url https://stack.nerc.mghpcc.org:13808 --read-only --allow-other --force-path-style --debug
+
+
Stopping the service
+

Stopping the service causes the container to unmount from the mount point.

+

To disable your service on every reboot:

+
sudo systemctl disable --now mountpoint-s3.service
+
+

Confirm the Service is not in "Active" Status:

+
sudo systemctl status mountpoint-s3.service
+
+○ mountpoint-s3.service - Mountpoint for Amazon S3 mount
+    Loaded: loaded (/etc/systemd/system/mountpoint-s3.service; disabled; vendor p>
+    Active: inactive (dead)
+
+

Unmount the local mount point:

+

If you have the local mounted directory "bucket1" already mounted, unmount it +(replace ~/bucket1 with the location in which you have it mounted):

+
fusermount -u ~/bucket1
+
+

Or,

+
sudo umount -l ~/bucket1
+
+

Now reboot your VM:

+
sudo reboot
+
+
+

Further Reading

+

For further details, including instructions for downloading and installing +Mountpoint on various Linux operating systems, please refer to this resource.

+
+

2. Using Goofys

+

Install goofys

+

Access your virtual machine using SSH. Update the packages on your system and +install wget to be able to download the goofys binary directly to your VM:

+
sudo apt update && sudo apt upgrade
+sudo apt install wget
+
+

Now, navigate to your home directory:

+
cd
+
+

Use wget to download the goofys binary:

+
wget https://github.com/kahing/goofys/releases/latest/download/goofys
+
+

Make the goofys binary executable:

+
chmod +x goofys
+
+

Copy the goofys binary to somewhere in your path

+
sudo cp goofys /usr/bin/
+
+
+

To update goofys in the future

+

In order to update the newer version of goofys binary, you need to follow:

+
    +
  • +

    make sure that the data in the NERC Object Storage container is not actively +used by any applications on your VM.

    +
  • +
  • +

    remove the goofys binary from ubuntu's home directory as well as from /usr/bin/.

    +
  • +
  • +

    execute the above commands (those starting with wget and chmod) from your +home directory again and copy it to your path i.e. /usr/bin/.

    +
  • +
  • +

    reboot your VM.

    +
  • +
+
+

Provide credentials to configure goofys

+

Make a folder to store your credentials:

+
mkdir ~/.aws/
+
+

Create file ~/.aws/credentials using your favorite text editor (for example +nano or vim). Add the following contents to it which requires the EC2_ACCESS_KEY +and EC2_SECRET_KEY keys that you noted from ec2rc.sh file (during the "Setup +and enable your S3 API credentials" step):

+
[nerc]
+aws_access_key_id=<EC2_ACCESS_KEY>
+aws_secret_access_key=<EC2_SECRET_KEY>
+
+

Save the file and exit the text editor.

+

Create a local directory as a mount folder

+
mkdir -p ~/bucket1
+
+

Mount the Container locally using goofys

+

The object storage container i.e. "bucket1" will be mounted in the directory ~/bucket1

+
goofys -o allow_other --region RegionOne --profile "nerc" --endpoint "https://stack.nerc.mghpcc.org:13808" bucket1 ~/bucket1
+
+

In this command,

+
    +
  • +

    goofys is the goofys binary as we already copied this in /usr/bin/ path we + don't need to specify the full path.

    +
  • +
  • +

    -o stands for goofys options, and is handled differently.

    +
  • +
  • +

    allow_other Allows goofys with option allow_other only allowed if user_allow_other + is set in /etc/fuse.conf.

    +
  • +
  • +

    --profile corresponds to the name given on the ~/.aws/credentials file i.e. + [nerc].

    +
  • +
  • +

    --endpoint corresponds to the Object Storage endpoint url for NERC Object Storage. + You don't need to modify this url.

    +
  • +
  • +

    bucket1 is the name of the container which contains the NERC Object Storage + resources.

    +
  • +
  • +

    ~/bucket1 is the location of the folder in which you want to mount the Object + Storage filesystem.

    +
  • +
+

In order to test whether the mount was successful, navigate to the directory in +which you mounted the NERC container repository, for example:

+
cd ~/bucket1
+
+

Use the ls command to list its content. You should see the output similar to this:

+
ls
+
+README.md   image.png   test-file
+
+

The NERC Object Storage container repository has now been mounted using goofys.

+
+

Very Important Information

+

Please note that any of these Mountpoints is not persistent if your VM is +stopped or rebooted in the future. After each reboot, you will need to execute +the mounting command as mentioned above +again.

+
+

Mounting on system startup

+

Mounts can be set to occur automatically during system initialization so that mounted +file systems will persist even after the VM reboot.

+

Create directory in /root folder in which you will store the credentials:

+
sudo mkdir /root/.aws
+
+

Copy the credentials you created in your local directory to the .aws directory +in the /root folder:

+
sudo cp ~/.aws/credentials /root/.aws/
+
+

Configure mounting of the bucket1 container

+

Open the file /etc/fstab using your favorite command line text editor for editing. +You will need sudo privileges for that. For example, if you want to use nano, execute +this command:

+
sudo nano /etc/fstab
+
+

Proceed with one of the methods below depending on whether you wish to have the +"bucket1" repository automatically mounted at system startup:

+
Method 1: Mount the repository automatically on system startup
+

Add the following line to the /etc/fstab file:

+
/usr/bin/goofys#bucket1 /home/ubuntu/bucket1 fuse _netdev,allow_other,--dir-mode=0777,--file-mode=0666,--region=RegionOne,--profile=nerc,--endpoint=https://stack.nerc.mghpcc.org:13808 0 0
+
+
Method 2: Do NOT mount the repository automatically on system startup
+

Add the following line to the /etc/fstab file:

+
/usr/bin/goofys#bucket1 /home/ubuntu/bucket1 fuse noauto,_netdev,allow_other,--dir-mode=0777,--file-mode=0666,--region=RegionOne,--profile=nerc,--endpoint=https://stack.nerc.mghpcc.org:13808 0 0
+
+

The difference between this code and the code mentioned in Method 1 is the addition +of the option noauto.

+
+

Content of /etc/fstab

+

In the /etc/fstab content as added above:

+
grep goofys /etc/fstab
+
+/usr/bin/goofys#bucket1 /home/ubuntu/bucket1 fuse _netdev,allow_other,--dir-mode=0777,--file-mode=0666,--region=RegionOne,--profile=nerc,--endpoint=https://stack.nerc.mghpcc.org:13808 0 0
+
+
    +
  • +

    /usr/bin/goofys with the location of your goofys binary.

    +
  • +
  • +

    /home/ubuntu/bucket1 is the location in which you wish to mount bucket1 +container from your NERC Object Storage.

    +
  • +
  • +

    --profile=nerc is the name you mentioned on the ~/.aws/credentials file +i.e. [nerc].

    +
  • +
+
+

Once you have added that line to your /etc/fstab file, reboot the VM. After the +system has restarted, check whether the NERC Object Storage repository i.e. bucket1 +is mounted in the directory specified by you i.e. in /home/ubuntu/bucket1.

+
+

Important Information

+

If you just want to test your mounting command written in /etc/fstab without +"Rebooting" the VM you can also do that by running sudo mount -a. +And if you want to stop automatic mounting of the container from the NERC +Object Storage repository i.e. bucket1, remove the line you added in the +/etc/fstab file. You can also comment it out by adding # character in front +of that line. After that, reboot the VM. Optionally, you can also remove the +goofys binary and the credentials file located at ~/.aws/credentials if +you no longer want to use goofys.

+
+

3. Using S3FS

+

Install S3FS

+

Access your virtual machine using SSH. Update the packages on your system and install +s3fs:

+
sudo apt update && sudo apt upgrade
+sudo apt install s3fs
+
+
+

For RedHat/Rocky/AlmaLinux

+

The RedHat/Rocky/AlmaLinux repositiories do not have s3fs. Therefore, +you will need to compile it yourself.

+

First, using your local computer, visit the following website (it contains +the releases of s3fs): https://github.com/s3fs-fuse/s3fs-fuse/releases/latest.

+

Then, in the section with the most recent release find the part Assets. +From there, find the link to the zip version of the Source code.

+

S3FS  Latest Assets Download

+

Right click on one of the Source Code i.e. "v1.94.zip" and select the "Copy +link address". You will need this link to use later as a parameter for the +wget command to download it to your virtual machine.

+

Access your VM on the NERC OpenStack using the web console or SSH.

+

Update your packages:

+
sudo dnf update -y
+
+

Install the prerequisites including fuse, the C++ compiler and make:

+
sudo dnf config-manager --set-enabled crb
+
+sudo dnf install automake fuse fuse-devel gcc-c++ git libcurl-devel libxml2-devel make openssl-devel wget unzip
+
+# OR, sudo dnf --enablerepo=crb install automake fuse fuse-devel gcc-c++ git libcurl-devel libxml2-devel make openssl-devel wget unzip
+
+

Now, use wget to download the source code. Replace https://github.com/s3fs-fuse/s3fs-fuse/archive/refs/tags/v1.94.zip with the link to the source code you found previously:

+
wget https://github.com/s3fs-fuse/s3fs-fuse/archive/refs/tags/v1.94.zip
+
+

Use the ls command to verify that the zip archive has been downloaded:

+
ls
+
+

Unzip the archive (replace v1.94.zip with the name of the archive you downloaded):

+
unzip v1.94.zip
+
+

Use the ls command to find the name of the folder you just extracted:

+
ls
+
+

Now, navigate to that folder (replace s3fs-fuse-1.94 with the name of the folder you just extracted):

+
cd s3fs-fuse-1.94
+
+

Perform the compilation by executing the following commands in order:

+
./autogen.sh
+./configure
+make
+sudo make install
+
+

s3fs should now be installed in /usr/local/bin/s3fs.

+
+

Create a file which will store the S3 Credentials

+

Store your S3 credentials in a file ${HOME}/.passwd-s3fs and set "owner-only" +permissions. Run the following command to create a pair of EC2_ACCESS_KEY and +EC2_SECRET_KEY keys that you noted from ec2rc.sh file (above) to store them +in the file.

+
echo EC2_ACCESS_KEY:EC2_SECRET_KEY > ${HOME}/.passwd-s3fs
+
+

Change the permissions of this file to 600 to set "owner-only" permissions:

+
chmod 600 ${HOME}/.passwd-s3fs
+
+

Create a Container in the NERC Project's Object storage

+

We create it using the OpenStack Swift client:

+
sudo apt install python3-swiftclient
+
+

Let's call the Container "bucket1"

+
swift post bucket1
+
+
+

More about Swift Interface

+

You can read more about using Swift Interface for NERC Object Storage here.

+
+

Create a local directory as a mount point in your VM

+
mkdir -p ~/bucket1
+
+

Mount the Container locally using s3fs

+

The object storage container i.e. "bucket1" will be mounted in the directory ~/bucket1

+
s3fs bucket1 ~/bucket1 -o passwd_file=~/.passwd-s3fs -o url=https://stack.nerc.mghpcc.org:13808 -o use_path_request_style -o umask=0002
+
+

Unmount the local mount point

+

If you have the local mounted directory "bucket1" already mounted, unmount it +(replace ~/bucket1 with the location in which you have it mounted):

+
sudo umount -l ~/bucket1
+
+

Configure mounting of the bucket1 repository

+

Open the file /etc/fstab using your favorite command line text editor for editing. +You will need sudo privileges for that. For example, if you want to use nano, execute +this command:

+
sudo nano /etc/fstab
+
+

Proceed with one of the methods below depending on whether you wish to have the +"bucket1" repository automatically mounted at system startup:

+

Method 1: Mount the repository automatically on startup

+

Add the following line to the /etc/fstab file:

+
/usr/bin/s3fs#bucket1 /home/ubuntu/bucket1 fuse passwd_file=/home/ubuntu/.passwd-s3fs,_netdev,allow_other,use_path_request_style,uid=0,umask=0222,mp_umask=0222,gid=0,url=https://stack.nerc.mghpcc.org:13808 0 0
+
+

Method 2: Do NOT mount the repository automatically on startup

+

Add the following line to the /etc/fstab file:

+
/usr/bin/s3fs#bucket1 /home/ubuntu/bucket1 fuse noauto,passwd_file=/home/ubuntu/.passwd-s3fs,_netdev,allow_other,use_path_request_style,uid=0,umask=0222,mp_umask=0222,gid=0,url=https://stack.nerc.mghpcc.org:13808 0 0
+
+

The difference between this code and the code mentioned in Method 1 is the addition +of the option noauto.

+
+

Content of /etc/fstab

+

In the /etc/fstab content as added above:

+
    +
  • +

    /usr/bin/s3fs is the location of your s3fs binary. If you installed +it using apt on Debian or Ubuntu, you do not have to change anything here. +If you are using a self-compiled version of s3fs created on RedHat/Rocky/AlmaLinux +as explained above, that location is /usr/local/bin/s3fs.

    +
  • +
  • +

    /home/ubuntu/.passwd-s3fs is the location of the file which contains +the key pair used for mounting the "bucket1" repository as we named it in previous +step.

    +
  • +
+
+

4. Using Rclone

+

Installing Rclone

+

Install rclone as described here or for our Ubuntu +based VM we can just SSH into the VM and then run the following command using default +ubuntu user:

+
curl -sSL https://rclone.org/install.sh | sudo bash
+
+

Configuring Rclone

+

If you run rclone config file you will see where the default location is +for you.

+
rclone config file
+Configuration file doesn't exist, but rclone will use this path:
+/home/ubuntu/.config/rclone/rclone.conf
+
+

So create the config file as mentioned above path: /home/ubuntu/.config/rclone/rclone.conf +and add the following entry with the name [nerc]:

+
[nerc]
+type = s3
+env_auth = false
+provider = Other
+endpoint = https://stack.nerc.mghpcc.org:13808
+acl = public-read
+access_key_id = <YOUR_EC2_ACCESS_KEY_FROM_ec2rc_FILE>
+secret_access_key = <YOUR_EC2_SECRET_KEY_FROM_ec2rc_FILE>
+location_constraint =
+server_side_encryption =
+
+

More about the config for AWS S3 compatible API can be seen here.

+
+

Important Information

+

Mind that if set env_auth = true then it will take variables from environment, +so you shouldn't insert it in this case.

+
+

Listing the Containers and Contents of a Container

+

Once your Object Storage has been configured in Rclone, you can then use the +Rclone interface to List all the Containers with the "lsd" command

+
rclone lsd "nerc:"
+
+

Or,

+
rclone lsd "nerc:" --config=rclone.conf
+
+

For e.g.,

+
rclone lsd "nerc:" --config=rclone.conf
+      -1 2024-04-23 20:21:43        -1 bucket1
+
+

To list the files and folders available within a container i.e. "bucket1" in this +case, within a container we can use the "ls" command:

+
rclone ls "nerc:bucket1/"
+  653 README.md
+    0 image.png
+   12 test-file
+
+

Create a mount point directory

+
mkdir -p bucket1
+
+

Mount the container with Rclone

+

Start the mount like this, where home/ubuntu/bucket1 is an empty existing directory:

+
rclone -vv --vfs-cache-mode full mount nerc:bucket1 /home/ubuntu/bucket1 --allow-other --allow-non-empty
+
+

On Linux, you can run mount in either foreground or background (aka daemon) +mode. Mount runs in foreground mode by default. Use the --daemon flag to force +background mode i.e.

+
rclone mount remote:path/to/files /path/to/local/mount --daemon
+
+

When running in background mode the user will have to stop the mount manually:

+
fusermount -u /path/to/local/mount
+
+

Or,

+
sudo umount -l /path/to/local/mount
+
+

Now we have the mount running and we have background mode also enabled. Lets say +there is a scenario where we want the mount to be persistent after a server/machine +reboot. There are few ways to do it:

+

Create systemd unit file i.e. rclone-mount.service

+

Create a systemd service unit file that is going to execute the above script +and dynamically mount or unmount the container:

+
sudo nano /etc/systemd/system/rclone-mount.service
+
+

Edit the file to look like the below:

+
[Unit]
+Description=rclone mount
+Documentation=http://rclone.org/docs/
+AssertPathIsDirectory=/home/ubuntu/bucket1
+After=network-online.target
+
+[Service]
+Type=simple
+User=root
+Group=root
+ExecStart=/usr/bin/rclone mount \
+      --config=home/ubuntu/.config/rclone/rclone.conf \
+      --vfs-cache-mode full \
+      nerc:bucket1 /home/ubuntu/bucket1 \
+              --allow-other \
+              --allow-non-empty
+
+ExecStop=/bin/fusermount -u /home/ubuntu/bucket1
+Restart=always
+RestartSec=10
+
+[Install]
+WantedBy=default.target
+
+

The service is launched as soon as the network is up and running, it mounts the +bucket and remains active. Stopping the service causes the container to unmount +from the mount point.

+

Launch the service using a service manager

+

Now reload systemd deamon:

+
sudo systemctl daemon-reload
+
+

Start your service

+
sudo systemctl start rclone-mount.service
+
+

To check the status of your service

+
sudo systemctl status rclone-mount.service
+
+

To enable your service on every reboot

+
sudo systemctl enable --now rclone-mount.service
+
+
+

Information

+

The service name is based on the file name i.e. /etc/systemd/system/rclone-mount.service +so you can just use rclone-mount instead of rclone-mount.service on all +above systemctl commands.

+

To debug you can use:

+

sudo systemctl status rclone-mount.service -l --no-pager or, +journalctl -u rclone-mount --no-pager | tail -50

+
+

Verify, if the container is mounted successfully:

+
df -hT | grep rclone
+nerc:bucket1   fuse.rclone  1.0P     0  1.0P   0% /home/ubuntu/bucket1
+
+

5. Using JuiceFS

+

Preparation

+

A JuiceFS file system consists of two parts:

+
    +
  • +

    Object Storage: Used for data storage.

    +
  • +
  • +

    Metadata Engine: A database used for storing metadata. In this case, we will + use a durable Redis in-memory database service that + provides extremely fast performance.

    +
  • +
+

Installation of the JuiceFS client

+

Access your virtual machine using SSH. Update the packages on your system and install +the JuiceFS client:

+
sudo apt update && sudo apt upgrade
+# default installation path is /usr/local/bin
+curl -sSL https://d.juicefs.com/install | sh -
+
+

Verify the JuiceFS client is running in background:

+
ps aux | grep juicefs
+ubuntu     16275  0.0  0.0   7008  2212 pts/0    S+   18:44   0:00 grep --color=auto juicefs
+
+

Installing and Configuring Redis database

+

Install Redis by running:

+
sudo apt install redis-server
+
+

This will download and install Redis and its dependencies. Following this, there +is one important configuration change to make in the Redis configuration file, +which was generated automatically during the installation.

+

You can check the line number where to find supervised by running:

+
sudo cat /etc/redis/redis.conf -n | grep supervised
+
+228  #   supervised no      - no supervision interaction
+229  #   supervised upstart - signal upstart by putting Redis into SIGSTOP mode
+231  #   supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET
+232  #   supervised auto    - detect upstart or systemd method based on
+236  supervised no
+
+

Open this file with your preferred text editor:

+
sudo nano /etc/redis/redis.conf -l
+
+

Inside the config file, find the supervised directive. This directive allows you +to declare an init system to manage Redis as a service, providing you with more +control over its operation. The supervised directive is set to no by default. +Since you are running Ubuntu, which uses the systemd +init system, change this to systemd as shown here:

+

Redis Server Config

+
    +
  • Binding to localhost:
  • +
+

By default, Redis is only accessible from localhost. We need to verify that +by locating this line by running:

+
  sudo cat /etc/redis/redis.conf -n | grep bind
+
+  ...
+  68  bind 127.0.0.1 ::1
+  ...
+
+

and make sure it is uncommented (remove the # if it exists) by editing this +file with your preferred text editor.

+

So save and close it when you are finished. If you used nano to edit the +file, do so by pressing CTRL + X, Y, then ENTER.

+

Then, restart the Redis service to reflect the changes you made to the configuration +file:

+
  sudo systemctl restart redis.service
+
+

With that, you've installed and configured Redis and it's running on your machine. +Before you begin using it, you should first check whether Redis is functioning +correctly.

+

Start by checking that the Redis service is running:

+
  sudo systemctl status redis
+
+

If it is running without any errors, this command will show "active (running)" +Status.

+

To test that Redis is functioning correctly, connect to the server using redis-cli, +Redis's command-line client:

+
  redis-cli
+
+

In the prompt that follows, test connectivity with the ping command:

+
  ping
+
+

Output:

+
  PONG
+
+

Also, check that binding to localhost is working fine by running the following +netstat command:

+
  sudo netstat -lnp | grep redis
+
+  tcp        0      0 127.0.0.1:6379          0.0.0.0:*               LISTEN      16967/redis-server
+  tcp6       0      0 ::1:6379                :::*                    LISTEN      16967/redis-server
+
+
+

Important Note

+

The netstat command may not be available on your system by default. If + this is the case, you can install it (along with a number of other handy + networking tools) with the following command: sudo apt install net-tools.

+
+
Configuring a Redis Password
+

Configuring a Redis password enables one of its two built-in security features — +the auth command, which requires clients to authenticate to access the database. +The password is configured directly in Redis's configuration file, +/etc/redis/redis.conf.

+

First, we need to locate the line where the requirepass directive is mentioned:

+
sudo cat /etc/redis/redis.conf -n | grep requirepass
+
+...
+790  # requirepass foobared
+...
+
+

Then open the Redis's config file i.e. /etc/redis/redis.conf again with your +preferred editor:

+
sudo nano /etc/redis/redis.conf -l
+
+

Uncomment it by removing the #, and change foobared to a secure password.

+
+

How to generate random password?

+

You can use openssl to generate random password by running the following +command locally:

+

openssl rand 12 | openssl base64 -A

+

<your_redis_password>

+
+

After saving and closing it when you are finished. You need to restart the Redis +service to reflect the changes you made to the configuration file by running:

+
sudo systemctl restart redis.service
+
+

To test that the password works, open up the Redis client:

+
redis-cli
+
+

The following shows a sequence of commands used to test whether the Redis password +works. The first command tries to set a key to a value before authentication:

+
127.0.0.1:6379> set key1 10
+
+

That won’t work because you didn't authenticate, so Redis returns an error:

+

Output:

+
(error) NOAUTH Authentication required.
+
+

The next command authenticates with the password specified in the Redis configuration +file:

+
127.0.0.1:6379> auth <your_redis_password>
+
+

Redis acknowledges:

+

Output:

+
OK
+
+

After that, running the previous command again will succeed:

+
127.0.0.1:6379> set key1 10
+
+

Output:

+
OK
+
+

get key1 queries Redis for the value of the new key.

+
127.0.0.1:6379> get key1
+
+

Output:

+
"10"
+
+

After confirming that you're able to run commands in the Redis client after +authenticating, you can exit redis-cli:

+
127.0.0.1:6379> quit
+
+

Setting authorizing S3 access using juicefs config

+

You can store the S3 credentials using juicefs config that allows us to add the +Access Key and Secret Key for the file system by running:

+
juicefs config \
+--access-key=<EC2_ACCESS_KEY> \
+--secret-key=<EC2_SECRET_KEY> \
+redis://default:<your_redis_password>@127.0.0.1:6379/1
+
+

Formatting file system

+
sudo juicefs format --storage s3 --bucket https://stack.nerc.mghpcc.org:13808/<your_container> redis://default:<your_redis_password>@127.0.0.1:6379/1 myjfs
+
+

Mounting file system manually

+
Create a local directory as a mount point folder
+
mkdir -p ~/bucket1
+
+
Mount the Container locally using juicefs
+

The formatted file system "myjfs" will be mounted in the directory ~/bucket1 by +running the following command:

+
juicefs mount redis://default:<your_redis_password>@127.0.0.1:6379/1 ~/bucket1
+
+

Mount JuiceFS at Boot Time

+

After JuiceFS has been successfully formatted, follow this guide to set up auto-mount +on boot.

+

We can speficy the --update-fstab option on the mount command that will automatically +help you set up mount at boot:

+
sudo juicefs mount --update-fstab --max-uploads=50 --writeback --cache-size 204800 <META-URL> <MOUNTPOINT>
+
+grep <MOUNTPOINT> /etc/fstab
+<META-URL> <MOUNTPOINT> juicefs _netdev,max-uploads=50,writeback,cache-size=204800 0 0
+
+ls -l /sbin/mount.juicefs
+lrwxrwxrwx 1 root root 22 Apr 24 20:25 /sbin/mount.juicefs -> /usr/local/bin/juicefs
+
+

For example,

+
sudo juicefs mount --update-fstab --max-uploads=50 --writeback --cache-size 204800 redis://default:<your_redis_password>@127.0.0.1:6379/1 ~/bucket1
+
+grep juicefs /etc/fstab
+redis://default:<your_redis_password>@127.0.0.1:6379/1  /home/ubuntu/bucket1  juicefs  _netdev,cache-size=204800,max-uploads=50,writeback  0 0
+
+ls -l /sbin/mount.juicefs
+lrwxrwxrwx 1 root root 22 Apr 24 20:25 /sbin/mount.juicefs -> /usr/local/bin/juicefs
+
+

Automating Mounting with systemd service unit file

+

If you're using JuiceFS and need to apply settings like database access password, +S3 access key, and secret key, which are hidden from the command line using environment +variables for security reason, it may not be easy to configure them in the /etc/fstab +file. In such cases, you can utilize systemd to mount your JuiceFS instance.

+

Here's how you can set up your systemd configuration file:

+

Create a systemd service unit file that is going to execute the above script +and dynamically mount or unmount the container:

+
sudo nano /etc/systemd/system/juicefs-mount.service
+
+

Edit the file to look like the below:

+
[Unit]
+Description=JuiceFS mount
+Documentation=https://juicefs.com/docs/
+AssertPathIsDirectory=/home/ubuntu/bucket1
+After=network-online.target
+
+[Service]
+Type=simple
+User=root
+Group=root
+ExecStart=/usr/local/bin/juicefs mount \
+"redis://default:<your_redis_password>@127.0.0.1:6379/1" \
+/home/ubuntu/bucket1 \
+--no-usage-report \
+--writeback \
+--cache-size 102400 \
+--cache-dir /home/juicefs_cache \
+--buffer-size 2048 \
+--open-cache 0 \
+--attr-cache 1 \
+--entry-cache 1 \
+--dir-entry-cache 1 \
+--cache-partial-only false \
+--free-space-ratio 0.1 \
+--max-uploads 20 \
+--max-deletes 10 \
+--backup-meta 0 \
+--log /var/log/juicefs.log \
+--get-timeout 300 \
+--put-timeout 900 \
+--io-retries 90 \
+--prefetch 1
+
+ExecStop=/usr/local/bin/juicefs umount /home/ubuntu/bucket1
+Restart=always
+RestartSec=10
+
+[Install]
+WantedBy=default.target
+
+
+

Important Information

+

Feel free to modify the options and environments according to your needs. Please +make sure you change <your_redis_password> to your own Redis password that +was setup by following this step.

+
+

The service is launched as soon as the network is up and running, it mounts the +bucket and remains active. Stopping the service causes the container to unmount +from the mount point.

+
Launch the service as daemon
+

Now reload systemd deamon:

+
sudo systemctl daemon-reload
+
+

Start your service

+
sudo systemctl start juicefs-mount.service
+
+

To check the status of your service

+
sudo systemctl status juicefs-mount.service
+
+

To enable your service on every reboot

+
sudo systemctl enable --now juicefs-mount.service
+
+
+

Information

+

The service name is based on the file name i.e. /etc/systemd/system/juicefs-mount.service +so you can just use juicefs-mount instead of juicefs-mount.service on all +above systemctl commands.

+

To debug you can use:

+

sudo systemctl status juicefs-mount.service -l --no-pager or, +journalctl -u juicefs-mount --no-pager | tail -50

+
+

Verify, if the container is mounted successfully:

+
df -hT | grep juicefs
+JuiceFS:myjfs  fuse.juicefs  1.0P  4.0K  1.0P   1% /home/ubuntu/bucket1
+
+

Data Synchronization

+

juicefs sync is a powerful data migration tool, which can copy data across all +supported storages including object storage, JuiceFS itself, and local file systems, +you can freely copy data between any of these systems.

+

Command Syntax

+

To synchronize data from SRC i.e. the source data address or path to DST i.e. +the destination address or path;, capable for both directories and files.

+
juicefs sync [command options] SRC DST
+
+
+

More Information

+

[command options] are synchronization options. See command reference +for more details.

+
+

Address format:

+
[NAME://][ACCESS_KEY:SECRET_KEY[:TOKEN]@]BUCKET[.ENDPOINT][/PREFIX]
+
+# MinIO only supports path style
+minio://[ACCESS_KEY:SECRET_KEY[:TOKEN]@]ENDPOINT/BUCKET[/PREFIX]
+
+

Synchronize between Object Storage and JuiceFS

+

The following command synchronizes movies container on Object Storage Container +to your local JuiceFS File System i.e ~/jfs:

+
# create local folder
+mkdir -p ~/jfs
+# mount JuiceFS
+juicefs mount -d redis://default:<your_redis_password>@127.0.0.1:6379/1 ~/jfs
+# synchronize
+juicefs sync --force-update s3://<EC2_ACCESS_KEY>:<EC2_SECRET_KEY>@movies.stack.nerc.mghpcc.org:13808/ ~/jfs/
+
+

The following command synchronizes images directory from your local +JuiceFS File System i.e ~/jfs to Object Storage Container i.e. movies +container:

+
# mount JuiceFS
+juicefs mount -d redis://default:<your_redis_password>@127.0.0.1:6379/1 ~/jfs
+# create local folder and add some file to this folder
+mkdir -p ~/jfs/images/
+cp "test.image" ~/jfs/images/
+# synchronization
+juicefs sync --force-update ~/jfs/images/ s3://<EC2_ACCESS_KEY>:<EC2_SECRET_KEY>@movies.stack.nerc.mghpcc.org:13808/images/
+
+

How to destroy a file system

+

After JuiceFS has been successfully formatted, follow this guide to clean up.

+

JuiceFS client provides the destroy command to completely destroy a file system, +which will result in:

+
    +
  • +

    Deletion of all metadata entries of this file system

    +
  • +
  • +

    Deletion of all data blocks of this file system

    +
  • +
+

Use this command in the following format:

+
juicefs destroy <METADATA URL> <UUID>
+
+

Here,

+

<METADATA URL>: The URL address of the metadata engine

+

<UUID>: The UUID of the file system

+

Find the UUID of existing mount file system

+

You can run either juicefs config redis://default:<your_redis_password>@127.0.0.1:6379/1 +or juicefs status redis://default:<your_redis_password>@127.0.0.1:6379/1 to get +detailed information about mounted file system i.e. "myjfs" that is setup by +following this step. The +output looks like shown here:

+
{
+...
+"Name": "myjfs",
+"UUID": "<UUID>",
+...
+}
+
+

Destroy a file system

+

Please note the "UUID" that you will need to run juicefs destroy command as +shown below:

+
juicefs destroy redis://default:<your_redis_password>@127.0.0.1:6379/1 <UUID> --force
+
+

When destroying a file system, the client will issue a confirmation prompt. Please +make sure to check the file system information carefully and enter y after confirming +it is correct.

+
+

Danger

+

The destroy operation will cause all the data in the database and the object +storage associated with the file system to be deleted. Please make sure to +back up the important data before operating!

+
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/persistent-storage/object-storage/index.html b/openstack/persistent-storage/object-storage/index.html new file mode 100644 index 00000000..1bcbc879 --- /dev/null +++ b/openstack/persistent-storage/object-storage/index.html @@ -0,0 +1,6698 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Object Storage

+

OpenStack Object Storage (Swift) is a highly available, distributed, eventually consistent +object/blob store. Object Storage is used to manage cost-effective and long-term +preservation and storage of large amounts of data across clusters of standard server +hardware. The common use cases include the storage, backup and archiving of unstructured +data, such as documents, static web content, images, video files, and virtual +machine images, etc.

+

The end-users can interact with the object storage system through a RESTful HTTP +API i.e. the Swift API or use one of the many client libraries that exist for all +of the popular programming languages, such as Java, Python, Ruby, and C# based on +provisioned quotas. Swift also supports and is compatible with Amazon's Simple +Storage Service (S3) API +that makes it easier for the end-users to move data between multiple storage end +points and supports hybrid cloud setup.

+

1. Access by Web Interface i.e. Horizon Dashboard

+

To get started, navigate to Project -> Object Store -> Containers.

+

Object Store

+

Create a Container

+

In order to store objects, you need at least one Container to put them in. +Containers are essentially top-level directories. Other services use the +terminology buckets.

+

Click Create Container. Give your container a name.

+

Create a Container

+
+

Important Note

+

The container name needs to be unique, not just within your project but +across all of our OpenStack installation. If you get an error message +after trying to create the container, try giving it a more unique name.

+
+

For now, leave the "Container Access" set to Private.

+

Upload a File

+

Click on the name of your container, and click the Upload File icon as shown below:

+

Container Upload File

+

Click Browse and select a file from your local machine to upload.

+

It can take a while to upload very large files, so if you're just testing it out +you may want to use a small text file or similar.

+

Container Upload Popup

+

By default the File Name will be the same as the original file, but you can change +it to another name. Click "Upload File". Your file will appear inside the container +as shown below once successful:

+

Successful File Upload

+

Using Folders

+

Files stored by definition do not organize objects into folders, but you can use +folders to keep your data organized.

+

On the backend, the folder name is actually just prefixed to the object name, but +from the web interface (and most other clients) it works just like a folder.

+

To add a folder, click on the "+ folder" icon as shown below:

+

Upload Folder on Container

+

Make a container public

+

Making a container public allows you to send your collaborators a URL that gives +access to the container's contents.

+
+

Hosting a static website using public Container

+

You can use public Container to host a static website. On a static website, +individual webpages include static website content (HTML, CSS etc.). They +might also contain client-side scripts (e.g. JavaScript).

+
+

Click on your container's name, then check the "Public Access" checkbox. Note that +"Public Access" changes from "Disabled" to "Link".

+

Setting Container Public Access

+

Click "Link" to see a list of object in the container. This is the URL of your container.

+
+

Important Note

+

Anyone who obtains the URL will be able to access the container, so this +is not recommended as a way to share sensitive data with collaborators.

+
+

In addition, everything inside a public container is public, so we recommend creating +a separate container specifically for files that should be made public.

+

To download the file test-file we would use the following url.

+
+

Very Important Information

+

Here 4c5bccef73c144679d44cbc96b42df4e is specific Tenant Id or +Project Id. You can get this value when you click on the public container's +Link on a new browser tab.

+
+

Or, you can just click on "Download" next to the file's name as shown below:

+

Download File From Container

+

You can also interact with public objects using a utility such as curl:

+
curl https://stack.nerc.mghpcc.org:13808/v1/AUTH_4c5bccef73c144679d44cbc96b42df4e/unique-container-test
+test-file
+
+

To download a file:

+
curl -o local-file.txt https://stack.nerc.mghpcc.org:13808/v1/AUTH_4c5bccef73c144679d44cbc96b42df4e/unique-container-test/test-file
+
+

Make a container private

+

You can make a public container private by clicking on your container's name, +then uncheck the "Public Access" checkbox. Note that "Public Access" changes +from "Link" to "Disabled".

+

This will deactivate the public URL of the container and then it will show "Disabled".

+

Disable Container Public Access

+

2. Access by using APIs

+

i. OpenStack CLI

+

Prerequisites:

+

To run the OpenStack CLI commands, you need to have:

+ +

Some Object Storage management examples

+
Create a container
+

In order to create a container in the Object Storage service, you can use the +openstack client with the following command.

+
openstack container create mycontainer
++---------------------------------------+-------------+------------------------------------+
+| account                               | container   | x-trans-id                         |
++---------------------------------------+-------------+------------------------------------+
+| AUTH_4c5bccef73c144679d44cbc96b42df4e | mycontainer | txb875f426a011476785171-00624b37e8 |
++---------------------------------------+-------------+------------------------------------+
+
+

Once created you can start adding objects.

+
Manipulate objects in a container
+

To upload files to a container you can use the following command

+
openstack object create --name my_test_file mycontainer test_file.txt
++--------------+-------------+----------------------------------+
+| object       | container   | etag                             |
++--------------+-------------+----------------------------------+
+| my_test_file | mycontainer | e3024896943ee80422d1e5ff44423658 |
++--------------+-------------+----------------------------------+
+
+

Once uploaded you can see the metadata through:

+
openstack object show mycontainer my_test_file
++----------------+---------------------------------------+
+| Field          | Value                                 |
++----------------+---------------------------------------+
+| account        | AUTH_4c5bccef73c144679d44cbc96b42df4e |
+| container      | mycontainer                           |
+| content-length | 26                                    |
+| content-type   | application/octet-stream              |
+| etag           | e3024896943ee80422d1e5ff44423658      |
+| last-modified  | Mon, 04 Apr 2022 18:27:14 GMT         |
+| object         | my_test_file                          |
++----------------+---------------------------------------+
+
+

You can save the contents of the object from your container to your local machine +by using:

+

openstack object save mycontainer my_test_file --file test_file.txt

+
+

Very Important

+

Please note that this will overwrite the file in the local directory.

+
+

Finally you can delete the object with the following command

+

openstack object delete mycontainer my_test_file

+
Delete the container
+

If you want to delete the container, you can use the following command

+

openstack container delete mycontainer

+

If the container has some data, you can trigger the recursive option to delete +the objects internally.

+
openstack container delete mycontainer
+Conflict (HTTP 409) (Request-ID: tx6b53c2b3e52d453e973b4-00624b400f)
+
+

So, try to delete the container recursively using command

+

openstack container delete --recursive mycontainer

+
List existing containers
+

You can check the existing containers with

+
openstack container list
++---------------+
+| Name          |
++---------------+
+| mycontainer   |
++---------------+
+
+
Swift quota utilization
+

To check the overall space used, you can use the following command

+
openstack object store account show
++------------+---------------------------------------+
+| Field      | Value                                 |
++------------+---------------------------------------+
+| Account    | AUTH_4c5bccef73c144679d44cbc96b42df4e |
+| Bytes      | 665                                   |
+| Containers | 1                                     |
+| Objects    | 3                                     |
++------------+---------------------------------------+
+
+

To check the space used by a specific container

+
openstack container show mycontainer
++----------------+---------------------------------------+
+| Field          | Value                                 |
++----------------+---------------------------------------+
+| account        | AUTH_4c5bccef73c144679d44cbc96b42df4e |
+| bytes_used     | 665                                   |
+| container      | mycontainer                           |
+| object_count   | 3                                     |
+| read_acl       | .r:*,.rlistings                       |
+| storage_policy | Policy-0                              |
++----------------+---------------------------------------+
+
+

ii. Swift Interface

+

This is a python client for the Swift API. There's a Python API +(the swiftclient module), and a command-line script (swift).

+
    +
  • This example uses a Python3 virtual environment, but you are free to choose + any other method to create a local virtual environment like Conda.
    python3 -m venv venv
    +
    +
  • +
+
+

Choosing Correct Python Interpreter

+

Make sure you are able to use python or python3 or py -3 (For + Windows Only) to create a directory named venv (or whatever name you + specified) in your current working directory.

+
+
    +
  • Activate the virtual environment by running:
  • +
+

on Linux/Mac: source venv/bin/activate

+

on Windows: venv\Scripts\activate

+

Install Python Swift Client page at PyPi

+
    +
  • Once virtual environment is activated, install python-swiftclient and python-keystoneclient
  • +
+

pip install python-swiftclient python-keystoneclient

+
    +
  • Swift authenticates using a user, tenant, and key, which map to your OpenStack + username, project,and password.
  • +
+

For this, you need to download the "NERC's OpenStack RC File" with the +credentials for your NERC project from the NERC's OpenStack dashboard. +Then you need to source that RC file using: source *-openrc.sh. You can +read here +on how to do this.

+

By sourcing the "NERC's OpenStack RC File", you will set the all required +environmental variables.

+
Check your authentication variables
+

Check what the swift client will use as authentication variables:

+
swift auth
+
+
Create your first container
+

Lets create your first container by using the following command:

+
swift post <container_name>
+
+

For example:

+
swift post unique-container-test
+
+
Upload files
+

Upload a file to your container:

+
swift upload <container_name> <file_or_folder>
+
+

To upload a file to the above listed i.e. unique-container-test, you can run +the following command:

+
swift upload unique-container-test ./README.md
+
+
Show containers
+

Then type the following command to get list of your containers:

+
swift list
+
+

This will output your existing container on your project, for e.g. +unique-container-test

+

Show objects inside your container:

+
swift list <container_name>.
+
+

For example:

+
swift list unique-container-test
+README.md
+
+
Show statistics of your containers and objects
+

You can see statistics, ranging from specific objects to the entire account. Use +the following command to se statistics of the specific container.

+
swift stat <container_name>
+
+

You can also use swift stat <container_name> <filename> to check stats of +individual files.

+

If you want to see stats from your whole account, you can type:

+
swift stat
+
+
Download objects
+

You can download single objects by using the following command:

+
swift download <container_name> <your_object> -o /path/to/local/<your_object>
+
+

For example:

+
swift download unique-container-test README.md -o ./README.md
+README.md [auth 2.763s, headers 2.907s, total 2.907s, 0.000 MB/s]
+
+

It's possible to test downloading an object/container without actually downloading, +for testing purposes:

+
swift download <container-name> --no-download
+
+
Download all objects from specific container
+
swift download <container_name> -D </path/to/folder/>
+
+
Download all objects from your account
+
swift download --all -D </path/to/folder/>
+
+
Delete objects
+

Delete specific object by issuing the following command:

+
swift delete <container_name> <object_name>
+
+

For example:

+
swift delete unique-container-test README.md
+README.md
+
+

And finally delete specific container by typing the following:

+
swift delete <container_name>
+
+

For example:

+
swift delete unique-container-test
+
+

Other helpful Swift commands:

+
delete               Delete a container or objects within a container.
+download             Download objects from containers.
+list                 Lists the containers for the account or the objects
+                    for a container.
+post                 Updates meta information for the account, container,
+                    or object; creates containers if not present.
+copy                 Copies object, optionally adds meta
+stat                 Displays information for the account, container,
+                    or object.
+upload               Uploads files or directories to the given container.
+capabilities         List cluster capabilities.
+tempurl              Create a temporary URL.
+auth                 Display auth related environment variables.
+bash_completion      Outputs option and flag cli data ready for
+                    bash_completion.
+
+
+

Helpful Tip

+

Type swift -h to learn more about using the swift commands. The client +has a --debugflag, which can be useful if you are facing any issues.

+
+

iii. Using AWS CLI

+

The Ceph Object Gateway supports basic operations through the Amazon S3 interface.

+

You can use both high-level (s3) commands with the AWS CLI +and API-Level (s3api) commands with the AWS CLI +to access object storage on your NERC project.

+

Prerequisites:

+

To run the s3 or s3api commands, you need to have:

+ +
+

Understand these Amazon S3 terms

+

i. Bucket – A top-level Amazon S3 folder.

+

ii. Prefix – An Amazon S3 folder in a bucket.

+

iii. Object – Any item that's hosted in an Amazon S3 bucket.

+
+

Configuring the AWS CLI

+

To access this interface, you must login through the OpenStack Dashboard and navigate +to "Projects > API Access" where you can download the "Download OpenStack +RC File" as well as the "EC2 Credentials".

+

EC2 Credentials

+

While clicking on "EC2 Credentials", this will download a file zip file including +ec2rc.sh file that has content similar to shown below. The important parts are +EC2_ACCESS_KEY and EC2_SECRET_KEY, keep them noted.

+
#!/bin/bash
+
+NOVARC=$(readlink -f "${BASH_SOURCE:-${0}}" 2>/dev/null) || NOVARC=$(python -c 'import os,sys; print os.path.abspath(os.path.realpath(sys.argv[1]))' "${BASH_SOURCE:-${0}}")
+NOVA_KEY_DIR=${NOVARC%/*}
+export EC2_ACCESS_KEY=...
+export EC2_SECRET_KEY=...
+export EC2_URL=https://localhost/notimplemented
+export EC2_USER_ID=42 # nova does not use user id, but bundling requires it
+export EC2_PRIVATE_KEY=${NOVA_KEY_DIR}/pk.pem
+export EC2_CERT=${NOVA_KEY_DIR}/cert.pem
+export NOVA_CERT=${NOVA_KEY_DIR}/cacert.pem
+export EUCALYPTUS_CERT=${NOVA_CERT} # euca-bundle-image seems to require this set
+
+alias ec2-bundle-image="ec2-bundle-image --cert ${EC2_CERT} --privatekey ${EC2_PRIVATE_KEY} --user 42 --ec2cert ${NOVA_CERT}"
+alias ec2-upload-bundle="ec2-upload-bundle -a ${EC2_ACCESS_KEY} -s ${EC2_SECRET_KEY} --url ${S3_URL} --ec2cert ${NOVA_CERT}"
+
+

Alternatively, you can obtain your EC2 access keys using the openstack client:

+
sudo apt install python3-openstackclient
+
+openstack ec2 credentials list
++------------------+------------------+--------------+-----------+
+| Access           | Secret           | Project ID   | User ID   |
++------------------+------------------+--------------+-----------+
+| <EC2_ACCESS_KEY> | <EC2_SECRET_KEY> | <Project_ID> | <User_ID> |
++------------------+------------------+--------------+-----------+
+
+

OR, you can even create a new one by running:

+
openstack ec2 credentials create
+
+
    +
  • Source the downloaded OpenStack RC File from Projects > API Access by using: + source *-openrc.sh command. Sourcing the RC File will set the required environment + variables.
  • +
+

Then run aws configuration command which requires the EC2_ACCESS_KEY and +EC2_SECRET_KEY keys that you noted from ec2rc.sh file (during the "Configuring +the AWS CLI" step):

+
  $> aws configure --profile "'${OS_PROJECT_NAME}'"
+  AWS Access Key ID [None]: <EC2_ACCESS_KEY>
+  AWS Secret Access Key [None]: <EC2_SECRET_KEY>
+  Default region name [None]:
+  Default output format [None]:
+
+

This will create the configuration file for AWS cli in your home directory +~/.aws/config with the EC2 profile based on your ${OS_PROJECT_NAME} and +~/.aws/credentials credentials with Access and Secret keys that you provided +above.

+

The EC2 profile is stored here:

+
  cat ~/.aws/config
+
+  [profile ''"'"'${OS_PROJECT_NAME}'"'"'']
+
+

Where as Credentials are store here:

+
  cat ~/.aws/credentials
+
+  ['${OS_PROJECT_NAME}']
+  aws_access_key_id = <EC2_ACCESS_KEY>
+  aws_secret_access_key = <EC2_SECRET_KEY>
+
+

Then you can manually create the configuration file for AWS cli in your home +directory ~/.aws/config with the ec2 profile and credentials as shown below:

+
  cat ~/.aws/config
+
+  ['${OS_PROJECT_NAME}']
+  aws_access_key_id = <EC2_ACCESS_KEY>
+  aws_secret_access_key = <EC2_SECRET_KEY>
+
+
+

Information

+

We need to have a profile that you use must have permissions to allow + the AWS operations can be performed.

+
+

Listing buckets using aws-cli

+

i. Using s3api:

+
aws --profile "'${OS_PROJECT_NAME}'" --endpoint-url=https://stack.nerc.mghpcc.org:13808 \
+    s3api list-buckets
+
+{
+    "Buckets": [
+        {
+            "Name": "unique-container-test",
+            "CreationDate": "2009-02-03T16:45:09+00:00"
+        }
+    ],
+    "Owner": {
+        "DisplayName": "Test Project-f69dcff:mmunakami@fas.harvard.edu",
+        "ID": "Test Project-f69dcff:mmunakami@fas.harvard.edu"
+    }
+}
+
+

ii. Alternatively, you can do the same using s3:

+
aws --profile "'${OS_PROJECT_NAME}'" --endpoint-url=https://stack.nerc.mghpcc.org:13808 \
+    s3 ls
+
+

Output:

+
2009-02-03 11:45:09 unique-container-test
+
+

To list contents inside bucket

+
aws --profile "'${OS_PROJECT_NAME}'" --endpoint-url=https://stack.nerc.mghpcc.org:13808 \
+    s3 ls s3://<your-bucket>
+
+

To make a bucket

+
aws --profile "'${OS_PROJECT_NAME}'" --endpoint-url=https://stack.nerc.mghpcc.org:13808 \
+    s3 mb s3://<your-bucket>
+
+

Adding/ Copying files from one container to another container

+
    +
  1. +

    Single file copy using cp command:

    +

    The aws tool provides a cp command to move files to your s3 bucket:

    +
    aws --profile "'${OS_PROJECT_NAME}'" --endpoint-url=https://stack.nerc.mghpcc.org:13808 \
    +    s3 cp <Your-file> s3://<your-bucket>/
    +
    +

    Output:

    +
    upload: .\<Your-file> to s3://<your-bucket>/<Your-file>
    +
    +
  2. +
  3. +

    Whole directory copy using the --recursive flag:

    +
    aws --profile "'${OS_PROJECT_NAME}'" --endpoint-url=https://stack.nerc.mghpcc.org:13808 \
    +    s3 cp <Your-directory> s3://<your-bucket>/ --recursive
    +
    +

    Output:

    +
    upload: <your-directory>/<file0> to s3://<your-bucket>/<file0>
    +upload: <your-directory>/<file1> to s3://<your-bucket>/<file1>
    +...
    +upload: <your-directory>/<fileN> to s3://<your-bucket>/<fileN>
    +
    +
  4. +
+

You can then use aws s3 ls to check that your files have been properly uploaded:

+
aws --profile "'${OS_PROJECT_NAME}'" --endpoint-url=https://stack.nerc.mghpcc.org:13808 \
+    s3 ls s3://<your-bucket>/
+
+

Output:

+
2022-04-04 16:32:38          <size> <file0>
+2022-04-04 16:32:38          <size> <file1>
+...
+2022-04-04 16:25:50          <size> <fileN>
+
+
+

Other Useful Flags

+

Additionally, aws cp provides an --exclude flag to filter files not to be +transferred, the syntax is: --exclude "<regex>"

+
+

To delete an object from a bucket

+
aws --profile "'${OS_PROJECT_NAME}'" --endpoint-url=https://stack.nerc.mghpcc.org:13808 \
+    s3 rm s3://<your-bucket>/argparse-1.2.1.tar.gz
+
+

To remove a bucket

+
aws --profile "'${OS_PROJECT_NAME}'" --endpoint-url=https://stack.nerc.mghpcc.org:13808 \
+    s3 rb s3://<your-bucket>
+
+

iv. Using s3cmd

+

S3cmd is a free command-line tool and client for uploading, retrieving and +managing data in Amazon S3 and other cloud storage service providers that use +the S3 protocol.

+

Prerequisites:

+ +

Configuring s3cmd

+

The EC2_ACCESS_KEY and EC2_SECRET_KEY keys that you noted from ec2rc.sh +file can then be plugged into s3cfg config file.

+

The .s3cfg file requires the following configuration to work with our Object +storage service:

+
# Setup endpoint
+host_base = stack.nerc.mghpcc.org:13808
+host_bucket = stack.nerc.mghpcc.org:13808
+use_https = True
+
+# Setup access keys
+access_key = <YOUR_EC2_ACCESS_KEY_FROM_ec2rc_FILE>
+secret_key = <YOUR_EC2_SECRET_KEY_FROM_ec2rc_FILE>
+
+# Enable S3 v4 signature APIs
+signature_v2 = False
+
+

We are assuming that the configuration file is placed in default location i.e. +$HOME/.s3cfg. If it is not the case you need to add the parameter --config=FILE +with the location of your configuration file to override the config location.

+

Using s3cmd

+
To list buckets
+

Use the following command to list all s3 buckets

+
s3cmd ls
+
+

Or,

+
s3cmd ls s3://
+
+2009-02-03 16:45  s3://nerc-test-container
+2009-02-03 16:45  s3://second-mycontainer
+2009-02-03 16:45  s3://unique-container-test
+
+
Create a new bucket
+

In order to create a bucket, you can use s3cmd with the following command

+
s3cmd mb s3://mybucket
+
+Bucket 's3://mybucket/' created
+
+s3cmd ls
+2009-02-03 16:45  s3://mybucket
+
+2009-02-03 16:45  s3://nerc-test-container
+2009-02-03 16:45  s3://second-mycontainer
+2009-02-03 16:45  s3://unique-container-test
+
+
To copy an object to bucket
+

Below command will upload file file.txt to the bucket using s3cmd command.

+
s3cmd put ~/file.txt s3://mybucket/
+
+upload: 'file.txt' -> 's3://mybucket/file.txt'  [1 of 1]
+0 of 0     0% in    0s     0.00 B/s  done
+
+

s3cmd also allows to set additional properties to the objects stored. In the +example below, we set the content type with the --mime-type option and the +cache-control parameter to 1 hour with --add-header.

+
s3cmd put --mime-type='application/json' --add-header='Cache-Control: max-age=3600' ~/file.txt s3://mybucket
+
+
Uploading Directory in bucket
+

If we need to upload entire directory use -r to upload it recursively as below.

+
s3cmd put -r <your-directory> s3://mybucket/
+
+upload: 'backup/hello.txt' -> 's3://mybucket/backup/hello.txt'  [1 of 1]
+0 of 0     0% in    0s     0.00 B/s  done
+
+
List the objects of bucket
+

List the objects of the bucket using ls switch with s3cmd.

+
s3cmd ls s3://mybucket/
+
+                       DIR   s3://mybucket/backup/
+2022-04-05 03:10         0   s3://mybucket/file.txt
+2022-04-05 03:14         0   s3://mybucket/hello.txt
+
+
To copy/ download an object to local system
+

Use the following command to download files from the bucket:

+
s3cmd get s3://mybucket/file.txt
+
+download: 's3://mybucket/file.txt' -> './file.txt'  [1 of 1]
+0 of 0     0% in    0s     0.00 B/s  done
+
+
To sync local file/directory to a bucket
+
s3cmd sync newdemo s3://mybucket
+
+upload: 'newdemo/newdemo_file.txt' -> 's3://mybucket/newdemo/newdemo_file.txt'  [1 of 1]
+0 of 0     0% in    0s     0.00 B/s  done
+
+

To sync bucket or object with local filesystem

+
s3cmd sync  s3://unique-container-test otherlocalbucket
+
+download: 's3://unique-container-test/README.md' -> 'otherlocalbucket/README.md'  [1 of 3]
+653 of 653   100% in    0s     4.54 kB/s  done
+download: 's3://unique-container-test/image.png' -> 'otherlocalbucket/image.png'  [2 of 3]
+0 of 0     0% in    0s     0.00 B/s  done
+download: 's3://unique-container-test/test-file' -> 'otherlocalbucket/test-file'  [3 of 3]
+12 of 12   100% in    0s    83.83 B/s  done
+Done. Downloaded 665 bytes in 1.0 seconds, 665.00 B/s.
+
+
To delete an object from bucket
+

You can delete files from the bucket with the following s3cmd command

+
s3cmd del s3://unique-container-test/README.md
+
+delete: 's3://unique-container-test/README.md'
+
+
To delete directory from bucket
+
s3cmd del s3://mybucket/newdemo
+
+delete: 's3://mybucket/newdemo'
+
+
To delete a bucket
+
s3cmd rb s3://mybucket
+
+ERROR: S3 error: 409 (BucketNotEmpty): The bucket you tried to delete is not empty
+
+
+

Important Information

+

The above command failed because of the bucket was not empty! You can remove +all objects inside the bucket and then use the command again. Or, you can +run the following command with -r or --recursive flag i.e. +s3cmd rb s3://mybucket -r or s3cmd rb s3://mybucket --recursive.

+
+

v. Using Rclone

+

rclone is a convenient and performant command-line tool for transferring files +and synchronizing directories directly between your local file systems and the +NERC's containers.

+

Prerequisites:

+

To run the rclone commands, you need to have:

+ +

Configuring Rclone

+

First, you’ll need to configure rclone. As the object storage systems +have quite complicated authentication these are kept in a config file.

+

If you run rclone config file you will see where the default location is +for you.

+
+

Note

+

For Windows users, you many need to specify the full path to the Rclone +executable file, if its not included in your systems PATH variable.

+
+

The EC2_ACCESS_KEY and EC2_SECRET_KEY keys that you noted from ec2rc.sh +file can then be plugged into rclone config file.

+

Edit the config file's content on the path location described by +rclone config file command and add the following entry with the name [nerc]:

+
[nerc]
+type = s3
+env_auth = false
+provider = Other
+endpoint = https://stack.nerc.mghpcc.org:13808
+acl = public-read
+access_key_id = <YOUR_EC2_ACCESS_KEY_FROM_ec2rc_FILE>
+secret_access_key = <YOUR_EC2_SECRET_KEY_FROM_ec2rc_FILE>
+location_constraint =
+server_side_encryption =
+
+

More about the config for AWS S3 compatible API can be seen here.

+
+

Important Information

+

Mind that if set env_auth = true then it will take variables from environment, +so you shouldn't insert it in this case.

+
+

OR, You can locally copy this content to a new config file and then use this +flag to override the config location, e.g. rclone --config=FILE

+
+

Interactive Configuration

+

Run rclone config to setup. See rclone config docs +for more details.

+
+

Using Rclone

+

rclone supports many subcommands (see +the complete list of Rclone subcommands). +A few commonly used subcommands (assuming you configured the NERC Object Storage +as nerc):

+
Listing the Containers and Contains of a Container
+

Once your Object Storage has been configured in Rclone, you can then use the +Rclone interface to List all the Containers with the "lsd" command

+
rclone lsd "nerc:"
+
+

Or,

+
rclone lsd "nerc:" --config=rclone.conf
+
+

For e.g.,

+
rclone lsd "nerc:" --config=rclone.conf
+        -1 2009-02-03 11:45:09        -1 second-mycontainer
+        -1 2009-02-03 11:45:09        -1 unique-container-test
+
+

To list the files and folders available within a container i.e. "unique-container-test" +in this case, within a container we can use the "ls" command:

+
rclone ls "nerc:unique-container-test/"
+  653 README.md
+    0 image.png
+   12 test-file
+
+
Uploading and Downloading Files and Folders
+

rclone support a variety of options to allow you to Copy, Sync and Move +files from one destination to another.

+

A simple example of this can be seen below, where we copy (Upload) the file +"upload.me" to the <your-bucket> container:

+
rclone copy "./upload.me" "nerc:<your-bucket>/"
+
+

Another example, to copy (Download) the file "upload.me" from the +<your-bucket> container to your local:

+
rclone -P copy "nerc:<your-bucket>/upload.me" "./"
+
+

Also, to Sync files into to the <your-bucket> container - try with +--dry-run first

+
rclone --dry-run sync /path/to/files nerc:<your-bucket>
+
+

Then sync for real

+
rclone sync /path/to/files nerc:<your-bucket>
+
+
Mounting object storage on local filesystem
+

Linux:

+

First, you need to create a directory on which you will mount your filesystem:

+

mkdir ~/mnt-rclone

+

Then you can simply mount your object storage with:

+

rclone -vv --vfs-cache-mode writes mount nerc: ~/mnt-rclone

+
+

More about using Rclone

+

You can read more about Rclone Mounting here.

+
+

Windows:

+

First you have to download Winfsp:

+

WinFsp is an open source Windows File System Proxy which provides a FUSE +emulation layer.

+

Then you can simply mount your object storage with (no need to create the directory +in advance):

+

rclone -vv --vfs-cache-mode writes mount nerc: C:/mnt-rclone

+

vfs-cache-mode flag enable file caching, you can use either writes or full +option. For further explanation you can see official documentation.

+

Now that your object storage is mounted, you can list, create and delete files +in it.

+
Unmount object storage
+

To unmount, simply press CTRL-C and the mount will be interrupted.

+

vi. Using client (Python) libraries

+

a. The EC2_ACCESS_KEY and EC2_SECRET_KEY keys that you noted from ec2rc.sh +file can then be plugged into your application. See below example using the +Python Boto3 library, +which connects through the S3 API interface through EC2 +credentials, and perform some basic operations on available buckets and file +that the user has access to.

+
import boto3
+
+# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#bucket
+s3 = boto3.resource('s3',
+    aws_access_key_id='YOUR_EC2_ACCESS_KEY_FROM_ec2rc_FILE',
+    aws_secret_access_key='YOUR_EC2_SECRET_KEY_FROM_ec2rc_FILE', #pragma: allowlist secret
+    endpoint_url='https://stack.nerc.mghpcc.org:13808',
+)
+
+# List all containers
+for bucket in s3.buckets.all():
+    print(' ->', bucket)
+
+# List all objects in a container i.e. unique-container-test is your current Container
+bucket = s3.Bucket('unique-container-test')
+for obj in bucket.objects.all():
+    print(' ->', obj)
+
+# Download an S3 object i.e. test-file a file available in your unique-container-test Container
+s3.Bucket('unique-container-test').download_file('test-file', './test-file.txt')
+
+# Add an image to the bucket
+# bucket.put_object(Body=open('image.png', mode='rb'), Key='image.png')
+
+

We can configure the Python Boto3 library, +to work with the saved aws profile.

+
import boto3
+
+# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
+session = boto3.Session(profile_name='<YOUR_CONFIGURED_AWS_PROFILE_NAME>')
+
+# List all containers
+s3 = boto3.client('s3', endpoint_url='https://stack.nerc.mghpcc.org:13808',)
+response = s3.list_buckets()
+
+for bucket in response['Buckets']:
+    print(' ->', bucket)
+
+

b. The EC2_ACCESS_KEY and EC2_SECRET_KEY keys that you noted from ec2rc.sh +file can then be plugged into your application. See below example using the +Python Minio library, +which connects through the S3 API interface through EC2 +credentials, and perform some basic operations on available buckets and file +that the user has access to.

+
from minio import Minio
+
+# Create client with access key and secret key.
+# https://docs.min.io/docs/python-client-api-reference.html
+client = Minio(
+    "stack.nerc.mghpcc.org:13808",
+    access_key='YOUR_EC2_ACCESS_KEY_FROM_ec2rc_FILE',
+    secret_key='YOUR_EC2_SECRET_KEY_FROM_ec2rc_FILE', #pragma: allowlist secret
+)
+
+# List all containers
+buckets = client.list_buckets()
+for bucket in buckets:
+    # print(bucket.name, bucket.creation_date)
+    print(' ->', bucket)
+
+# Make 'nerc-test-container' container if not exist.
+found = client.bucket_exists("nerc-test-container")
+if not found:
+    client.make_bucket("nerc-test-container")
+else:
+    print("Bucket 'nerc-test-container' already exists")
+
+# Upload './nerc-backup.zip' as object name 'nerc-backup-2022.zip'
+# to bucket 'nerc-test-container'.
+client.fput_object(
+    "nerc-test-container", "nerc-backup-2022.zip", "./nerc-backup.zip",
+)
+
+

3. Using Graphical User Interface (GUI) Tools

+

i. Using WinSCP

+

WinSCP is a popular and free open-source SFTP +client, SCP client, and FTP client for Windows. Its main function is file transfer +between a local and a remote computer, with some basic file management functionality +using FTP, FTPS, SCP, SFTP, WebDAV or S3 file transfer protocols.

+

Prerequisites:

+
    +
  • +

    WinSCP installed, see Download and Install the latest version of the WinSCP + for more information.

    +
  • +
  • +

    Go to WinSCP menu and open "Options > Preferences".

    +
  • +
  • +

    When the "Preferences" dialog window appears, select "Transfer" in the options + on the left pane.

    +
  • +
  • +

    Click on "Edit" button.

    +
  • +
  • +

    Then, on shown popup dialog box review the "Common options" group, uncheck the + "Preserve timestamp" option as shown below:

    +
  • +
+

Disable Preserve TimeStamp

+

Configuring WinSCP

+
    +
  • Click on "New Session" tab button as shown below:
  • +
+

Login

+
    +
  • Select "Amazon S3" from the "File protocol" dropdown options as shown below:
  • +
+

Choose Amazon S3 File Protocol

+
    +
  • Provide the following required endpoint information:
  • +
+

"Host name": "stack.nerc.mghpcc.org"

+

"Port number": "13808"

+

The EC2_ACCESS_KEY and EC2_SECRET_KEY keys that you noted from ec2rc.sh +file can then be plugged into "Access key ID" and "Secret access key" +respectively.

+

Config WinSCP

+
+

Helpful Tips

+

You can save your above configured session with some preferred name by + clicking the "Save" button and then giving a proper name to your session. + So that next time you don't need to again manually enter all your configuration.

+
+

Using WinSCP

+

You can follow above step to manually add a new session next time you open WinSCP +or, you can connect to your previously saved session (as listed on popup dialog +will show your all saved session name list) that will show up by just clicking on +the session name.

+

Then click "Login" button to connect to your NERC project's Object Storage as +shown below:

+

Login

+

Successful connection

+

ii. Using Cyberduck

+

Cyberduck is a libre server and cloud +storage browser for Mac and Windows. With an easy-to-use interface, connect to +servers, enterprise file sharing, and cloud storage.

+

Prerequisites:

+ +

Configuring Cyberduck

+
    +
  • Click on "Open Connection" tab button as shown below:
  • +
+

Open Connection

+
    +
  • Select "Amazon S3" from the dropdown options as shown below:
  • +
+

Choose Amazon S3

+
    +
  • Provide the following required endpoint information:
  • +
+

"Server": "stack.nerc.mghpcc.org"

+

"Port": "13808"

+

The EC2_ACCESS_KEY and EC2_SECRET_KEY keys that you noted from ec2rc.sh +file can then be plugged into "Access key ID" and "Secret Access Key" +respectively

+

Cyberduck Amazon S3 Configuration

+

Using Cyberduck

+

Then click "Connect" button to connect to your NERC project's Object Storage as +shown below:

+

Successful connection

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/persistent-storage/transfer-a-volume/index.html b/openstack/persistent-storage/transfer-a-volume/index.html new file mode 100644 index 00000000..54cf664c --- /dev/null +++ b/openstack/persistent-storage/transfer-a-volume/index.html @@ -0,0 +1,4678 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Transfer A Volume

+

You may wish to transfer a volume to a different project. Volumes are specific +to a project and can only be attached to one virtual machine at a time.

+
+

Important

+

The volume to be transferred must not be attached to an instance. This can +be examined by looking into "Status" column of the volume i.e. it need to +be "Available" instead of "In-use" and "Attached To" column need to be +empty.

+
+

Using Horizon dashboard

+

Once you're logged in to NERC's Horizon dashboard.

+

Navigate to Project -> Volumes -> Volumes.

+

Select the volume that you want to transfer and then click the dropdown next to +the "Edit volume" and choose "Create Transfer".

+

Create Transfer of a Volume

+

Give the transfer a name.

+

Volume Transfer Popup

+

You will see a screen like shown below. Be sure to capture the Transfer ID and +the Authorization Key.

+

Volume Transfer Initiated

+
+

Important Note

+

You can always get the transfer ID later if needed, but there is no way to +retrieve the key.

+

If the key is lost before the transfer is completed, you will have to cancel +the pending transfer and create a new one.

+
+

Then the volume will show the status like below:

+

Volume Transfer Initiated

+

Assuming you have access to the receiving project, switch to it using the Project +dropdown at the top right.

+

If you don't have access to the receiving project, give the transfer ID and +Authorization Key to a collaborator who does, and have them complete the next steps.

+

In the receiving project, go to the Volumes tab, and click "Accept Transfer" +button as shown below:

+

Volumes in a New Project

+

Enter the "Transfer ID" and the "Authorization Key" that were captured when the +transfer was created in the previous project.

+

Volume Transfer Accepted

+

The volume should now appear in the Volumes list of the receiving project as shown +below:

+

Successful Accepted Volume Transfer

+
+

Important Note

+

Any pending transfers can be cancelled if they are not yet accepted, but there +is no way to "undo" a transfer once it is complete. +To send the volume back to the original project, a new transfer would be required.

+
+

Using the CLI

+

Prerequisites:

+

To run the OpenStack CLI commands, you need to have:

+ +

Using the openstack client

+
    +
  • Identifying volume to transfer in your source project
  • +
+

openstack volume list ++---------------------------+-----------+-----------+------+-------------+ +| ID | Name | Status | Size | Attached to | ++---------------------------+-----------+-----------+------+-------------+ +| d8a5da4c-...-8b6678ce4936 | my-volume | available | 100 | | ++---------------------------+-----------+-----------+------+-------------+

+
    +
  • Create the transfer request
  • +
+

openstack volume transfer request create my-volume ++------------+--------------------------------------+ +| Field | Value | ++------------+--------------------------------------+ +| auth_key | b92d98fec2766582 | +| created_at | 2024-02-04T14:30:08.362907 | +| id | a16494cf-cfa0-47f6-b606-62573357922a | +| name | None | +| volume_id | d8a5da4c-41c8-4c2d-b57a-8b6678ce4936 | ++------------+--------------------------------------+

+
+

Pro Tip

+

If your volume name includes spaces, you need to enclose them in quotes, + i.e. "<VOLUME_NAME_OR_ID>". + For example: openstack volume transfer request create "My Volume"

+
+
    +
  • The volume can be checked as in the transfer status using + openstack volume transfer request list as follows and the volume is in status + awaiting-transfer while running openstack volume show <VOLUME_NAME_OR_ID> + as shown below:
  • +
+

openstack volume transfer request list ++---------------------------+------+--------------------------------------+ +| ID | Name | Volume | ++---------------------------+------+--------------------------------------+ +| a16494cf-...-62573357922a | None | d8a5da4c-41c8-4c2d-b57a-8b6678ce4936 | ++---------------------------+------+--------------------------------------+

+

openstack volume show my-volume ++------------------------------+--------------------------------------+ +| Field | Value | ++------------------------------+--------------------------------------+ +... +| name | my-volume | +... +| status | awaiting-transfer | ++------------------------------+--------------------------------------+

+
    +
  • The user of the destination project can authenticate and receive the authentication + key reported above. The transfer can then be initiated.
  • +
+

openstack volume transfer request accept --auth-key b92d98fec2766582 a16494cf-cfa0-47f6-b606-62573357922a ++-----------+--------------------------------------+ +| Field | Value | ++-----------+--------------------------------------+ +| id | a16494cf-cfa0-47f6-b606-62573357922a | +| name | None | +| volume_id | d8a5da4c-41c8-4c2d-b57a-8b6678ce4936 | ++-----------+--------------------------------------+

+
    +
  • And the results confirmed in the volume list for the destination project.
  • +
+

openstack volume list ++---------------------------+-----------+-----------+------+-------------+ +| ID | Name | Status | Size | Attached to | ++---------------------------+-----------+-----------+------+-------------+ +| d8a5da4c-...-8b6678ce4936 | my-volume | available | 100 | | ++---------------------------+-----------+-----------+------+-------------+

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/openstack/persistent-storage/volumes/index.html b/openstack/persistent-storage/volumes/index.html new file mode 100644 index 00000000..612f7d2c --- /dev/null +++ b/openstack/persistent-storage/volumes/index.html @@ -0,0 +1,4648 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Persistent Storage

+

Ephemeral disk

+

OpenStack offers two types of block storage: ephemeral storage and persistent volumes. +Ephemeral storage is available only during the instance's lifespan, persisting +across guest operating system reboots. However, once the instance is deleted, +its associated storage is also removed. The size of ephemeral storage is determined +by the virtual machine's flavor and remains constant for all virtual machines of +that flavor. The service level for ephemeral storage relies on the underlying hardware.

+

In its default configuration, when the instance is launched from an Image or +an Instance Snapshot, the choice for utilizing persistent storage is configured +by selecting the Yes option for "Create New Volume". Additionally, the "Delete +Volume on Instance Delete" setting is pre-set to No as shown below:

+

Instance Persistent Storage Option

+

If you set the "Create New Volume" option to No, the instance will boot +from either an image or a snapshot, with the instance only being attached to an +ephemeral disk. It's crucial to note that this configuration does NOT create +persistent block storage in the form of a Volume, which can pose risks. Consequently, +the disk of the instance won't appear in the "Volumes" list. To mitigate potential +data loss, we strongly recommend regularly taking a snapshot +of such a running ephemeral instance, referred to as an "instance snapshot", +especially if you want to safeguard or recover important states of your instance.

+
+

Very Important Note

+

Never use Ephemeral disk if you're setting up a production-level environment. +When the instance is deleted, its associated ephemeral storage is also removed.

+
+

Volumes

+

A volume is a detachable block storage device, similar to a USB hard drive. You +can attach a volume to only one instance.

+

Unlike Ephemeral disk, Volumes are the Block Storage devices that you attach to +instances to enable persistent storage. Users can attach a volume to a running +instance or detach a volume and attach it to another instance at any time.

+

Ownership of volumes can be transferred to another project by transferring it to +another project as described here.

+

Some uses for volumes:

+
    +
  • +

    Persistent data storage for ephemeral instances.

    +
  • +
  • +

    Transfer of data between projects

    +
  • +
  • +

    Bootable image where disk changes persist

    +
  • +
  • +

    Mounting the disk of one instance to another for troubleshooting

    +
  • +
+

How do you make your VM setup and data persistent?

+
    +
  • By default, when the instance is launched from an Image or an + Instance Snapshot, the choice for utilizing persistent storage is configured + by selecting the Yes option for "Create New Volume". It's crucial to + note that this configuration automatically creates persistent block storage + in the form of a Volume instead of using Ephemeral disk, which appears in + the "Volumes" list in the Horizon dashboard: Project -> Volumes -> Volumes.
  • +
+

Instance Persistent Storage Option

+
    +
  • By default, the setting for "Delete Volume on Instance Delete" is configured + to use No. This setting ensures that the volume created during the launch + of a virtual machine remains persistent and won't be deleted alongside the + instance unless explicitly chosen as "Yes". Such instances boot from a + bootable volume, utilizing an existing volume listed in the + Project -> Volumes -> Volumes menu.
  • +
+

To minimize the risk of potential data loss, we highly recommend consistently +creating backups through snapshots. +You can opt for a "volume snapshot" if you only need to capture the volume's +data. However, if your VM involves extended running processes and vital +in-memory data, preserving the precise VM state is essential. In such cases, +we recommend regularly taking a snapshot of the entire instance, known as an +"instance snapshot", provided you have sufficient Volume Storage quotas, +specifically the "OpenStack Volume Quota (GiB)" allocated for your resource allocation. +Please ensure that your allocation includes sufficient quota for the "OpenStack +Number of Volumes Quota" to allow for the creation of additional volumes based on +your quota attributes. Utilizing snapshots for backups is of utmost importance, +particularly when safeguarding or recovering critical states and data from your +instance.

+
+

Very Important: Requested/Approved Allocated Storage Quota and Cost

+

When you delete virtual machines +backed by persistent volumes, the disk data is retained, continuing to consume +approved storage resources for which you will still be billed. It's important +to note that the Storage quotas for NERC (OpenStack) Resource Allocations, +are specified by the "OpenStack Volume Quota (GiB)" and "OpenStack Swift Quota +(GiB)" allocation attributes. Storage cost is determined by +your requested and approved allocation values +to reserve storage from the total NESE storage pool.

+

If you request additional storage by specifying a changed quota value for +the "OpenStack Volume Quota (GiB)" and "OpenStack Swift Quota (GiB)" +allocation attributes through NERC's ColdFront interface, +invoicing for the extra storage will take place upon fulfillment or approval +of your request, as explained in our +Billing FAQs.

+

Conversely, if you request a reduction in the Storage quotas by specifying +a reduced quota value for the "OpenStack Volume Quota (GiB)" and "OpenStack Swift +Quota in Gigabytes" allocation attributes through a change request using ColdFront, +your invoicing will be adjusted accordingly when the request is submitted.

+

In both scenarios, 'invoicing' refers to the accumulation of hours +corresponding to the added or removed storage quantity.

+
+
+

Help Regarding Billing

+

Please send your questions or concerns regarding Storage and Cost by emailing +us at help@nerc.mghpcc.org +or, by submitting a new ticket at the NERC's Support Ticketing System.

+
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/other-tools/CI-CD/CI-CD-pipeline/index.html b/other-tools/CI-CD/CI-CD-pipeline/index.html new file mode 100644 index 00000000..4cab33f8 --- /dev/null +++ b/other-tools/CI-CD/CI-CD-pipeline/index.html @@ -0,0 +1,4531 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

What is Continuous Integration/Continuous Delivery (CI/CD) Pipeline?

+

A Continuous Integration/Continuous Delivery (CI/CD) pipeline involves a series of +steps that is performed in order to deliver a new version of application. CI/CD +pipelines are a practice focused on improving software delivery using automation.

+

Components of a CI/CD pipeline

+

The steps that form a CI/CD pipeline are distinct subsets of tasks that are +grouped into a pipeline stage. Typical pipeline stages include:

+
    +
  • +

    Build - The stage where the application is compiled.

    +
  • +
  • +

    Test - The stage where code is tested. Automation here can save both time + and effort.

    +
  • +
  • +

    Release - The stage where the application is delivered to the central repository.

    +
  • +
  • +

    Deploy - In this stage code is deployed to production environment.

    +
  • +
  • +

    Validation and compliance - The steps to validate a build are determined + by the needs of your organization. Image security scanning, security scanning + and code analysis of applications ensure the quality of images and written application's + code.

    +
  • +
+

CI/CD Pipeline Stages +Figure: CI/CD Pipeline Stages

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/other-tools/CI-CD/github-actions/images/deployed_app.png b/other-tools/CI-CD/github-actions/images/deployed_app.png new file mode 100644 index 00000000..83d384b1 Binary files /dev/null and b/other-tools/CI-CD/github-actions/images/deployed_app.png differ diff --git a/other-tools/CI-CD/github-actions/images/editconfig.png b/other-tools/CI-CD/github-actions/images/editconfig.png new file mode 100644 index 00000000..94ea0ff1 Binary files /dev/null and b/other-tools/CI-CD/github-actions/images/editconfig.png differ diff --git a/other-tools/CI-CD/github-actions/images/gh-cli.png b/other-tools/CI-CD/github-actions/images/gh-cli.png new file mode 100644 index 00000000..f13af4b7 Binary files /dev/null and b/other-tools/CI-CD/github-actions/images/gh-cli.png differ diff --git a/other-tools/CI-CD/github-actions/images/github-actions-successful.png b/other-tools/CI-CD/github-actions/images/github-actions-successful.png new file mode 100644 index 00000000..27cfc7f7 Binary files /dev/null and b/other-tools/CI-CD/github-actions/images/github-actions-successful.png differ diff --git a/other-tools/CI-CD/github-actions/images/github-actions-terminology.png b/other-tools/CI-CD/github-actions/images/github-actions-terminology.png new file mode 100644 index 00000000..f0b43be6 Binary files /dev/null and b/other-tools/CI-CD/github-actions/images/github-actions-terminology.png differ diff --git a/other-tools/CI-CD/github-actions/images/github-secrets.png b/other-tools/CI-CD/github-actions/images/github-secrets.png new file mode 100644 index 00000000..00bb317f Binary files /dev/null and b/other-tools/CI-CD/github-actions/images/github-secrets.png differ diff --git a/other-tools/CI-CD/github-actions/images/running.png b/other-tools/CI-CD/github-actions/images/running.png new file mode 100644 index 00000000..6cc31c24 Binary files /dev/null and b/other-tools/CI-CD/github-actions/images/running.png differ diff --git a/other-tools/CI-CD/github-actions/setup-github-actions-pipeline/index.html b/other-tools/CI-CD/github-actions/setup-github-actions-pipeline/index.html new file mode 100644 index 00000000..f7b87cbf --- /dev/null +++ b/other-tools/CI-CD/github-actions/setup-github-actions-pipeline/index.html @@ -0,0 +1,4741 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

How to setup GitHub Actions Pipeline

+

GitHub Actions gives you the ability to +create workflows to automate the deployment process to OpenShift. GitHub Actions +makes it easy to automate all your CI/CD workflows.

+

Terminiology

+

GitHub Actions Terminiology

+

Workflow

+

Automation-as-code that you can set up in your repository.

+

Events

+

30+ workflow triggers, including on schedule and from external systems.

+

Actions

+

Community-powered units of work that you can use as steps to create a job in a +workflow.

+

Deploy an Application to your NERC OpenShift Project

+
    +
  • +

    Prerequisites

    +

    You must have at least one active NERC-OCP (OpenShift) type resource allocation. +You can refer to this documentation +on how to get allocation and request "NERC-OCP (OpenShift)" type resource allocations.

    +
  • +
+

Steps

+
    +
  1. +

    Access to the NERC's OpenShift Container Platform at https://console.apps.shift.nerc.mghpcc.org + as described here. + To get access to NERC's OCP web console you need to be part of ColdFront's active + allocation.

    +
  2. +
  3. +

    Setup the OpenShift CLI Tools locally and configure the OpenShift CLI to + enable oc commands. Refer to this user guide.

    +
  4. +
  5. +

    Setup Github CLI on your local machine as described here + and verify you are able to run gh commands as shown below:

    +

    GitHub CLI Setup

    +
  6. +
  7. +

    Fork the simple-node-app App in your own Github:

    +

    This application runs a simple node.js server and serves up some static routes +with some static responses. This demo shows a simple container based app can +easily be bootstrapped onto your NERC OpenShift project space.

    +
    +

    Very Important Information

    +

    As you won't have full access to this repository, +we recommend first forking the repository on your own GitHub account. So, +you'll need to update all references to https://github.com/nerc-project/simple-node-app.git +to point to your own forked repository.

    +
    +

    To create a fork of the example simple-node-app repository:

    +
      +
    1. +

      Go to https://github.com/nerc-project/simple-node-app.

      +
    2. +
    3. +

      Cick the "Fork" button to create a fork in your own GitHub account, e.g. "https://github.com/<github_username>/simple-node-app".

      +
    4. +
    +
  8. +
  9. +

    Clone the simple-node-app git repository:

    +

    git clone https://github.com//simple-node-app.git +cd simple-node-app

    +
  10. +
  11. +

    Run either setsecret.cmd file if you are using Windows or setsecret.sh + file if you are using Linux based machine. Once executed, verify Github Secrets + are set Properly under your github repo's + settings >> secrets and variables >> Actions as shown here:

    +

    GitHub Secrets

    +
  12. +
  13. +

    Enable and Update GitHub Actions Pipeline on your own forked repo:

    +
      +
    • +

      Enable the OpenShift Workflow in the Actions tab of in your GitHub repository.

      +
    • +
    • +

      Update the provided sample OpenShift workflow YAML file i.e. openshift.yml, + which is located at "https://github.com/<github_username>/simple-node-app/actions/workflows/openshift.yml".

      +
      +

      Very Important Information

      +

      Workflow execution on OpenShift pipelines follows these steps:

      +
        +
      1. Checkout your repository
      2. +
      3. Perform a container image build
      4. +
      5. Push the built image to the GitHub Container Registry (GHCR) or +your preferred Registry
      6. +
      7. Log in to your NERC OpenShift cluster's project space
      8. +
      9. Create an OpenShift app from the image and expose it to the internet
      10. +
      +
      +
    • +
    +
  14. +
  15. +

    Edit the top-level 'env' section as marked with '🖊️' if the defaults are not + suitable for your project.

    +
  16. +
  17. +

    (Optional) Edit the build-image step to build your project:

    +

    The default build type uses a Dockerfile at the root of the repository, +but can be replaced with a different file, a source-to-image build, or a step-by-step +buildah build.

    +
  18. +
  19. +

    Commit and push the workflow file to your default branch to trigger a workflow + run as shown below:

    +

    GitHub Actions Successfully Complete

    +
  20. +
  21. +

    Verify that you can see the newly deployed application on the NERC's OpenShift + Container Platform at https://console.apps.shift.nerc.mghpcc.org + as described here, + and ensure that it can be browsed properly.

    +

    Application Deployed on NERC OCP

    +
  22. +
+

That's it! Every time you commit changes to your GitHub repo, GitHub Actions +will trigger your configured Pipeline, which will ultimately deploy your +application to your own NERC OpenShift Project.

+

Successfully Deployed Application

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/other-tools/CI-CD/images/ci-cd-flow.png b/other-tools/CI-CD/images/ci-cd-flow.png new file mode 100644 index 00000000..28248028 Binary files /dev/null and b/other-tools/CI-CD/images/ci-cd-flow.png differ diff --git a/other-tools/CI-CD/jenkins/images/CICD-in-NERC-Kubernetes.png b/other-tools/CI-CD/jenkins/images/CICD-in-NERC-Kubernetes.png new file mode 100644 index 00000000..8dec58d9 Binary files /dev/null and b/other-tools/CI-CD/jenkins/images/CICD-in-NERC-Kubernetes.png differ diff --git a/other-tools/CI-CD/jenkins/images/Github-webhook-events.png b/other-tools/CI-CD/jenkins/images/Github-webhook-events.png new file mode 100644 index 00000000..a15dd523 Binary files /dev/null and b/other-tools/CI-CD/jenkins/images/Github-webhook-events.png differ diff --git a/other-tools/CI-CD/jenkins/images/Github-webhook-fields.png b/other-tools/CI-CD/jenkins/images/Github-webhook-fields.png new file mode 100644 index 00000000..a28bb070 Binary files /dev/null and b/other-tools/CI-CD/jenkins/images/Github-webhook-fields.png differ diff --git a/other-tools/CI-CD/jenkins/images/Github-webhook.png b/other-tools/CI-CD/jenkins/images/Github-webhook.png new file mode 100644 index 00000000..ac144181 Binary files /dev/null and b/other-tools/CI-CD/jenkins/images/Github-webhook.png differ diff --git a/other-tools/CI-CD/jenkins/images/Jenkins-pipeline-build-success.png b/other-tools/CI-CD/jenkins/images/Jenkins-pipeline-build-success.png new file mode 100644 index 00000000..2c97122e Binary files /dev/null and b/other-tools/CI-CD/jenkins/images/Jenkins-pipeline-build-success.png differ diff --git a/other-tools/CI-CD/jenkins/images/Jenkins-pipeline-script.png b/other-tools/CI-CD/jenkins/images/Jenkins-pipeline-script.png new file mode 100644 index 00000000..5de93e28 Binary files /dev/null and b/other-tools/CI-CD/jenkins/images/Jenkins-pipeline-script.png differ diff --git a/other-tools/CI-CD/jenkins/images/add-credentials.png b/other-tools/CI-CD/jenkins/images/add-credentials.png new file mode 100644 index 00000000..1c0eb90d Binary files /dev/null and b/other-tools/CI-CD/jenkins/images/add-credentials.png differ diff --git a/other-tools/CI-CD/jenkins/images/adding-Jenkins-pipeline.png b/other-tools/CI-CD/jenkins/images/adding-Jenkins-pipeline.png new file mode 100644 index 00000000..b1cbdfa4 Binary files /dev/null and b/other-tools/CI-CD/jenkins/images/adding-Jenkins-pipeline.png differ diff --git a/other-tools/CI-CD/jenkins/images/adding-github-build-trigger.png b/other-tools/CI-CD/jenkins/images/adding-github-build-trigger.png new file mode 100644 index 00000000..a51d90f9 Binary files /dev/null and b/other-tools/CI-CD/jenkins/images/adding-github-build-trigger.png differ diff --git a/other-tools/CI-CD/jenkins/images/all-credentials.png b/other-tools/CI-CD/jenkins/images/all-credentials.png new file mode 100644 index 00000000..9001fa8a Binary files /dev/null and b/other-tools/CI-CD/jenkins/images/all-credentials.png differ diff --git a/other-tools/CI-CD/jenkins/images/console-output-build-now.png b/other-tools/CI-CD/jenkins/images/console-output-build-now.png new file mode 100644 index 00000000..2657c9ed Binary files /dev/null and b/other-tools/CI-CD/jenkins/images/console-output-build-now.png differ diff --git a/other-tools/CI-CD/jenkins/images/customize-jenkins-installing-plugins.png b/other-tools/CI-CD/jenkins/images/customize-jenkins-installing-plugins.png new file mode 100644 index 00000000..ee0b7092 Binary files /dev/null and b/other-tools/CI-CD/jenkins/images/customize-jenkins-installing-plugins.png differ diff --git a/other-tools/CI-CD/jenkins/images/deployed-app-on-k8s-node.png b/other-tools/CI-CD/jenkins/images/deployed-app-on-k8s-node.png new file mode 100644 index 00000000..fa25092b Binary files /dev/null and b/other-tools/CI-CD/jenkins/images/deployed-app-on-k8s-node.png differ diff --git a/other-tools/CI-CD/jenkins/images/docker-hub-credentials.png b/other-tools/CI-CD/jenkins/images/docker-hub-credentials.png new file mode 100644 index 00000000..fc17a5fd Binary files /dev/null and b/other-tools/CI-CD/jenkins/images/docker-hub-credentials.png differ diff --git a/other-tools/CI-CD/jenkins/images/github-settings.png b/other-tools/CI-CD/jenkins/images/github-settings.png new file mode 100644 index 00000000..3325084b Binary files /dev/null and b/other-tools/CI-CD/jenkins/images/github-settings.png differ diff --git a/other-tools/CI-CD/jenkins/images/global-credentials.png b/other-tools/CI-CD/jenkins/images/global-credentials.png new file mode 100644 index 00000000..7d1fddf3 Binary files /dev/null and b/other-tools/CI-CD/jenkins/images/global-credentials.png differ diff --git a/other-tools/CI-CD/jenkins/images/install-docker-pipeline-plugin.png b/other-tools/CI-CD/jenkins/images/install-docker-pipeline-plugin.png new file mode 100644 index 00000000..2e131429 Binary files /dev/null and b/other-tools/CI-CD/jenkins/images/install-docker-pipeline-plugin.png differ diff --git a/other-tools/CI-CD/jenkins/images/install-kubernetes-cli.png b/other-tools/CI-CD/jenkins/images/install-kubernetes-cli.png new file mode 100644 index 00000000..486e2d5c Binary files /dev/null and b/other-tools/CI-CD/jenkins/images/install-kubernetes-cli.png differ diff --git a/other-tools/CI-CD/jenkins/images/installed-jenkins-plugins.png b/other-tools/CI-CD/jenkins/images/installed-jenkins-plugins.png new file mode 100644 index 00000000..4312ef94 Binary files /dev/null and b/other-tools/CI-CD/jenkins/images/installed-jenkins-plugins.png differ diff --git a/other-tools/CI-CD/jenkins/images/jenkins-admin-login.png b/other-tools/CI-CD/jenkins/images/jenkins-admin-login.png new file mode 100644 index 00000000..81002f28 Binary files /dev/null and b/other-tools/CI-CD/jenkins/images/jenkins-admin-login.png differ diff --git a/other-tools/CI-CD/jenkins/images/jenkins-continue-as-admin.png b/other-tools/CI-CD/jenkins/images/jenkins-continue-as-admin.png new file mode 100644 index 00000000..237e005b Binary files /dev/null and b/other-tools/CI-CD/jenkins/images/jenkins-continue-as-admin.png differ diff --git a/other-tools/CI-CD/jenkins/images/jenkins-get-started.png b/other-tools/CI-CD/jenkins/images/jenkins-get-started.png new file mode 100644 index 00000000..5883de0a Binary files /dev/null and b/other-tools/CI-CD/jenkins/images/jenkins-get-started.png differ diff --git a/other-tools/CI-CD/jenkins/images/jenkins-pipeline-build.png b/other-tools/CI-CD/jenkins/images/jenkins-pipeline-build.png new file mode 100644 index 00000000..f66c0bdb Binary files /dev/null and b/other-tools/CI-CD/jenkins/images/jenkins-pipeline-build.png differ diff --git a/other-tools/CI-CD/jenkins/images/jenkins-pipeline-from-git.png b/other-tools/CI-CD/jenkins/images/jenkins-pipeline-from-git.png new file mode 100644 index 00000000..c6b57541 Binary files /dev/null and b/other-tools/CI-CD/jenkins/images/jenkins-pipeline-from-git.png differ diff --git a/other-tools/CI-CD/jenkins/images/jenkins_admin_password.png b/other-tools/CI-CD/jenkins/images/jenkins_admin_password.png new file mode 100644 index 00000000..14b64f3e Binary files /dev/null and b/other-tools/CI-CD/jenkins/images/jenkins_admin_password.png differ diff --git a/other-tools/CI-CD/jenkins/images/jenkins_store.png b/other-tools/CI-CD/jenkins/images/jenkins_store.png new file mode 100644 index 00000000..ea421a25 Binary files /dev/null and b/other-tools/CI-CD/jenkins/images/jenkins_store.png differ diff --git a/other-tools/CI-CD/jenkins/images/kubernetes-config-secret-file.png b/other-tools/CI-CD/jenkins/images/kubernetes-config-secret-file.png new file mode 100644 index 00000000..44dc44ee Binary files /dev/null and b/other-tools/CI-CD/jenkins/images/kubernetes-config-secret-file.png differ diff --git a/other-tools/CI-CD/jenkins/images/manage_credentials.png b/other-tools/CI-CD/jenkins/images/manage_credentials.png new file mode 100644 index 00000000..08bf5752 Binary files /dev/null and b/other-tools/CI-CD/jenkins/images/manage_credentials.png differ diff --git a/other-tools/CI-CD/jenkins/images/plugins-installation.png b/other-tools/CI-CD/jenkins/images/plugins-installation.png new file mode 100644 index 00000000..297b5ec0 Binary files /dev/null and b/other-tools/CI-CD/jenkins/images/plugins-installation.png differ diff --git a/other-tools/CI-CD/jenkins/images/security_groups_jenkins.png b/other-tools/CI-CD/jenkins/images/security_groups_jenkins.png new file mode 100644 index 00000000..d7689633 Binary files /dev/null and b/other-tools/CI-CD/jenkins/images/security_groups_jenkins.png differ diff --git a/other-tools/CI-CD/jenkins/integrate-your-GitHub-repository/index.html b/other-tools/CI-CD/jenkins/integrate-your-GitHub-repository/index.html new file mode 100644 index 00000000..35e2cea1 --- /dev/null +++ b/other-tools/CI-CD/jenkins/integrate-your-GitHub-repository/index.html @@ -0,0 +1,4568 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

How to Integrate Your GitHub Repository to Your Jenkins Project

+

This explains how to add a GitHub Webhook in your Jenkins Pipeline that saves your +time and keeps your project updated all the time.

+
+

Prerequisite

+

You need to have setup CI/CD Pipelines on NERC's OpenStack by following +this document.

+
+

What is a webhook?

+

A webhook is an HTTP callback, an HTTP POST that occurs when something happens through +a simple event-notification via HTTP POST. Github provides its own webhooks options +for such tasks.

+

Configuring GitHub

+

Let's see how to configure and add a webhook in GitHub:

+
    +
  1. +

    Go to your forked GitHub project repository.

    +
  2. +
  3. +

    Click on "Settings". in the right corner as shown below:

    +

    GitHub Settings

    +
  4. +
  5. +

    Click on "Webhooks" and then "Click "Add webhooks".

    +

    Github webhook

    +
  6. +
  7. +

    In the "Payload URL" field paste your Jenkins environment URL. At the end of this + URL add /github-webhook/ using http://<Floating-IP>:8080/github-webhook/ + i.e. http://199.94.60.4:8080/github-webhook/. + Select "Content type" as "application/json" and leave the "Secret" field empty.

    +

    Github webhook fields

    +
  8. +
  9. +

    In the page "Which events would you like to trigger this webhook?" select the + option "Let me select individual events". Then, check "Pull Requests" and "Pushes". + At the end of this option, make sure that the "Active" option is checked and + then click on "Add webhook" button.

    +

    Github webhook events

    +
  10. +
+

We're done with the configuration on GitHub's side! Now let's config on Jenkins side +to use this webhook.

+

That's it! in this way we can add a webhook to our job and ensure that everytime +you commits your changes to your Github repo, GitHub will trigger your new Jenkins +job. As we already had setup "Github hook tirgger for GITScm polling" for our +Jenkins pipeline setup previously.

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/other-tools/CI-CD/jenkins/setup-jenkins-CI-CD-pipeline/index.html b/other-tools/CI-CD/jenkins/setup-jenkins-CI-CD-pipeline/index.html new file mode 100644 index 00000000..72d2aa8b --- /dev/null +++ b/other-tools/CI-CD/jenkins/setup-jenkins-CI-CD-pipeline/index.html @@ -0,0 +1,5032 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

How to Set Up Jenkins Pipeline on a VM

+

This document will walk you through how to setup a minimal "CI/CD Pipeline To Deploy +To Kubernetes Cluster Using a CI/CD tool called Jenkins" on your NERC's OpenStack +environment. Jenkins uses the Kubernetes control plane on K8s Cluster to run pipeline +tasks that enable DevOps to spend more time coding and testing and less time +troubleshooting.

+
+

Prerequisite

+

You need Kubernetes cluster running in your OpenStack environment. To setup your +K8s cluster please Read this.

+
+

CI/CD Pipeline on NERC +Figure: CI/CD Pipeline To Deploy To Kubernetes Cluster Using Jenkins on NERC

+

Setup a Jenkins Server VM

+
    +
  • +

    Launch 1 Linux machine based on ubuntu-20.04-x86_64 and cpu-su.2 flavor with + 2vCPU, 8GB RAM, and 20GB storage.

    +
  • +
  • +

    Make sure you have added rules in the + Security Groups + to allow ssh using Port 22 access to the instance.

    +
  • +
  • +

    Setup a new Security Group with the following rules exposing port 8080 and + attach it to your new instance.

    +

    Jenkins Server Security Group

    +
  • +
  • +

    Assign a Floating IP + to your new instance so that you will be able to ssh into this machine:

    +
    ssh ubuntu@<Floating-IP> -A -i <Path_To_Your_Private_Key>
    +
    +

    For example:

    +
    ssh ubuntu@199.94.60.4 -A -i cloud.key
    +
    +
  • +
+

Upon successfully SSH accessing the machine, execute the following dependencies:

+
+

Very Important

+

Run the following steps as non-root user i.e. ubuntu.

+
+
    +
  • +

    Update the repositories and packages:

    +
    sudo apt-get update && sudo apt-get upgrade -y
    +
    +
  • +
  • +

    Turn off swap

    +
    swapoff -a
    +sudo sed -i '/ swap / s/^/#/' /etc/fstab
    +
    +
  • +
  • +

    Install curl and apt-transport-https

    +
    sudo apt-get update && sudo apt-get install -y apt-transport-https curl
    +
    +
  • +
+
+

Download and install the latest version of Docker CE

+
    +
  • +

    Download and install Docker CE:

    +
    curl -fsSL https://get.docker.com -o get-docker.sh
    +sudo sh get-docker.sh
    +
    +
  • +
  • +

    Configure the Docker daemon:

    +
    sudo usermod -aG docker $USER && newgrp docker
    +
    +
  • +
+
+

Install kubectl

+

kubectl: the command line util to talk to your cluster.

+
    +
  • +

    Download the Google Cloud public signing key and add key to verify releases

    +
    curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo \
    +  apt-key add -
    +
    +
  • +
  • +

    add kubernetes apt repo

    +
    cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
    +deb https://apt.kubernetes.io/ kubernetes-xenial main
    +EOF
    +
    +
  • +
  • +

    Install kubectl

    +
    sudo apt-get update
    +sudo apt-get install -y kubectl
    +
    +
  • +
  • +

    apt-mark hold is used so that these packages will not be updated/removed automatically

    +
    sudo apt-mark hold kubectl
    +
    +
  • +
+
+

Install a Jenkins Server using Docker

+

To install a Jenkins server using Docker run the following command:

+
docker run -u 0 --privileged --name jenkins -it -d -p 8080:8080 -p 50000:50000 \
+    -v /var/run/docker.sock:/var/run/docker.sock \
+    -v $(which docker):/usr/bin/docker \
+    -v $(which kubectl):/usr/bin/kubectl \
+    -v /home/jenkins_home:/var/jenkins_home \
+    jenkins/jenkins:latest
+
+

Once successfully docker run, browse to http://<Floating-IP>:8080 +this will show you where to get the initial Administrator password to get started +i.e. /var/jenkins_home/secrets/initialAdminPassword as shown below:

+

Jenkins Successfully Installed

+

The /var/jenkins_home in Jenkins docker container is a mounted volume to the +host's /home/jenkins_home so you can just browse to +/home/jenkins_home/secrets/initialAdminPassword on your ssh'ed host machine to +copy the same content from /var/jenkins_home/secrets/initialAdminPassword.

+
+

Initial Admin Password

+

If you can't find the Admin password at /var/jenkins_home/secrets/initialAdminPassword, +then try to locate it at its original location, i.e. /home/jenkins_home/secrets/initialAdminPassword.

+
+

OR, you can run docker ps on worker node where you run the Jenkins server. +Note the Name of the docker container and then run: docker logs -f <jenkins_docker_container_name>. +This will show the initial Administrator password on the terminal which you can +copy and paste on the web GUI on the browser.

+
+

Initial Admin Password

+

When you run docker logs -f <jenkins_docker_container_name>, the initial +password for the "Admin" user can be found between the rows of asterisks +as shown below: +Initial Admin Password

+
+
    +
  • +

    Once you login to the Jenkins Web UI by entering the admin password shown on + CLI terminal, click on the "Install suggested plugins" button as shown below:

    +

    Install Customize Jenkins Plugins

    +

    Customize Jenkins Installing Plugins

    +

    Continue by selecting 'Skip and continue as admin' first as shown below:

    +

    Skip and continue as admin

    +

    Then click the 'Save and Finish' button as shown below and then, Jenkins is ready +to use.

    +

    Jenkins Get Started

    +
  • +
+

Install the required Plugins

+
    +
  • +

    Jenkins has a wide range of plugin options. From your Jenkins dashboard navigate + to "Manage Jenkins > Manage Plugins" as shown below:

    +

    Jenkins Plugin Installation

    +

    Select the "Available" tab and then locate Docker pipeline by searching +and then click "Install without restart" button as shown below:

    +

    Jenkins Required Plugin To Install

    +

    Also, install the Kubernetes CLI plugin that allows you to configure kubectl +commands on Jenkinsfile to interact with Kubernetes clusters as shown below:

    +

    Install Kubernetes CLI

    +
  • +
+

Create the required Credentials

+
    +
  • +

    Create a global credential for your Docker Hub Registry by providing the username + and password that will be used by the Jenkins pipelines:

    +
      +
    1. +

      Click on the "Manage Jenkins" menu and then click on the "Manage Credentials" + link as shown below:

      +

      Manage Credentials

      +
    2. +
    3. +

      Click on Jenkins Store as shown below:

      +

      Jenkins Store

      +
    4. +
    5. +

      The credentials can be added by clicking the 'Add Credentials' button in + the left pane.

      +

      Adding Credentials

      +
    6. +
    +
  • +
  • +

    First, add the 'DockerHub' credentials as 'Username with password' with the + ID dockerhublogin.

    +

    a. Select the Kind "Username with password" from the dropdown options.

    +

    b. Provide your Docker Hub Registry's username and password.

    +

    c. Give its ID and short description. ID is very important is that will need +to be specify as used on your Jenkinsfile i.e. dockerhublogin.

    +

    Docker Hub Credentials

    +
  • +
  • +

    Config the 'Kubeconfig' credentials as 'Secret file' that holds Kubeconfig + file from K8s master i.e. located at /etc/kubernetes/admin.conf with the ID + 'kubernetes'

    +

    a. Click on the "Add Credentials" button in the left pane.

    +

    b. Select the Kind "Secret file" from the dropdown options.

    +

    c. On File section choose the config file that contains the EXACT content +from your K8s master's kubeconfig file located at: /etc/kubernetes/admin.conf

    +

    d. Give a ID and description that you will need to use on your Jenkinsfile +i.e. kubernetes.

    +

    Kubernetes Configuration Credentials

    +

    e. Once both credentials are successfully added the following credentials are +shown:

    +

    Jenkins All Credentials

    +
  • +
+

Fork the nodeapp App in your own Github

+
+

Very Important Information

+

As you won't have full access to this repository, +we recommend first forking the repository on your own GitHub account. So, you'll +need to update all references to https://github.com/nerc-project/nodeapp.git +to point to your own forked repository.

+
+

To create a fork of the example nodeapp repository:

+
    +
  1. +

    Go to https://github.com/nerc-project/nodeapp.

    +
  2. +
  3. +

    Cick the "Fork" button to create a fork in your own GitHub account, e.g. "https://github.com/<github_username>/nodeapp".

    +
  4. +
  5. +

    Review the "Jenkinsfile" that is included at the root of the forked git repo.

    +
    +

    Very Important Information

    +

    A sample Jenkinsfile is available at the root of our demo application's Git +repository, which we can reference in our Jenkins pipeline steps. For example, +in this case, we are using this repository +where our demo Node.js application resides.

    +
    +
  6. +
+

Modify the Jenkins Declarative Pipeline Script file

+
    +
  • +

    Modify the provided ‘Jenkinsfile’ to specify your own Docker Hub account + and github repository as specified in "<dockerhub_username>" and "<github_username>".

    +
    +

    Very Important Information

    +

    You need to replace "<dockerhub_username>" and "<github_username>" +with your actual DockerHub and GitHub usernames, respectively. Also, +ensure that the global credentials IDs mentioned above match those used +during the credential saving steps mentioned earlier. For instance, +dockerhublogin corresponds to the DockerHub ID saved during the +credential saving process for your Docker Hub Registry's username and +password. Similarly, kubernetes corresponds to the 'Kubeconfig' ID +assigned for the Kubeconfig credential file.

    +
    +
  • +
  • +

    Below is an example of a Jenkins declarative Pipeline Script file:

    +

    pipeline {

    +
    environment {
    +  dockerimagename = "<dockerhub_username>/nodeapp:${env.BUILD_NUMBER}"
    +  dockerImage = ""
    +}
    +
    +agent any
    +
    +stages {
    +
    +  stage('Checkout Source') {
    +    steps {
    +      git branch: 'main', url: 'https://github.com/<github_username>/nodeapp.git'
    +    }
    +  }
    +
    +  stage('Build image') {
    +    steps{
    +      script {
    +        dockerImage = docker.build dockerimagename
    +      }
    +    }
    +  }
    +
    +  stage('Pushing Image') {
    +    environment {
    +      registryCredential = 'dockerhublogin'
    +    }
    +    steps{
    +      script {
    +        docker.withRegistry('https://registry.hub.docker.com', registryCredential){
    +          dockerImage.push()
    +        }
    +      }
    +    }
    +  }
    +
    +  stage('Docker Remove Image') {
    +    steps {
    +      sh "docker rmi -f ${dockerimagename}"
    +      sh "docker rmi -f registry.hub.docker.com/${dockerimagename}"
    +    }
    +  }
    +
    +  stage('Deploying App to Kubernetes') {
    +    steps {
    +      sh "sed -i 's/nodeapp:latest/nodeapp:${env.BUILD_NUMBER}/g' deploymentservice.yml"
    +      withKubeConfig([credentialsId: 'kubernetes']) {
    +        sh 'kubectl apply -f deploymentservice.yml'
    +      }
    +    }
    +  }
    +}
    +
    +

    }

    +
    +

    Other way to Generate Pipeline Jenkinsfile

    +

    You can generate your custom Jenkinsfile by clicking on "Pipeline Syntax" +link shown when you create a new Pipeline when clicking the "New Item" menu +link.

    +
    +
  • +
+

Setup a Pipeline

+
    +
  • +

    Once you review the provided Jenkinsfile and understand the stages, + you can now create a pipeline to trigger it on your newly setup Jenkins server:

    +

    a. Click on the "New Item" link.

    +

    b. Select the "Pipeline" link.

    +

    c. Give name to your Pipeline i.e. “jenkins-k8s-pipeline

    +

    Adding Jenkins Credentials

    +

    d. Select "Build Triggers" tab and then select +Github hook tirgger for GITScm polling as shown below:

    +

    Adding Github Build Trigger

    +

    e. Select "Pipeline" tab and then select the "Pipeline script from SCM" from +the dropdown options. Then you need to specify the Git as SCM and also "Repository +URL" for your public git repo and also specify your branch and Jenkinsfile's +name as shown below:

    +

    Add Jenkins Pipeline Script From Git

    +

    OR, You can copy/paste the contents of your Jenkinsfile on the given textbox. +Please make sure you are selecting the "Pipeline script" from the dropdown options.

    +

    Add Jenkins Pipeline Script Content

    +

    f. Click on "Save" button.

    +
  • +
+

How to manually Trigger the Pipeline

+
    +
  • +

    Finally, click on the "Build Now" menu link on right side navigation that + will triggers the Pipeline process i.e. Build docker image, Push Image to your + Docker Hub Registry and Pull the image from Docker Registry, Remove local Docker + images and then Deploy to K8s Cluster as shown below:

    +

    Jenkins Pipeline Build Now

    +

    You can see the deployment to your K8s Cluster is successful then you can browse +the output using http://<Floating-IP>:<NodePort> as shown below:

    +

    K8s Deployed Node App

    +

    You can see the Console Output logs of this pipeline process by clicking the +icon before the id of the started Pipeline on the right bottom corner.

    +

    Jenkins console

    +

    The pipeline stages after successful completion looks like below:

    +

    Jenkins Pipeline Stages Run Successful

    +
  • +
+

We will continue on next documentation on +how to setup GitHub Webhook in your Jenkins Pipeline so that Jenkins will trigger +the build when a devops commits code to your GitHub repository's specific branch.

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/other-tools/apache-spark/images/launch-multiple-worker-instances.png b/other-tools/apache-spark/images/launch-multiple-worker-instances.png new file mode 100644 index 00000000..5abe9735 Binary files /dev/null and b/other-tools/apache-spark/images/launch-multiple-worker-instances.png differ diff --git a/other-tools/apache-spark/images/spark-completed-applications.png b/other-tools/apache-spark/images/spark-completed-applications.png new file mode 100644 index 00000000..a2a82f86 Binary files /dev/null and b/other-tools/apache-spark/images/spark-completed-applications.png differ diff --git a/other-tools/apache-spark/images/spark-nodes.png b/other-tools/apache-spark/images/spark-nodes.png new file mode 100644 index 00000000..3fc57ba7 Binary files /dev/null and b/other-tools/apache-spark/images/spark-nodes.png differ diff --git a/other-tools/apache-spark/images/spark-running-applications.png b/other-tools/apache-spark/images/spark-running-applications.png new file mode 100644 index 00000000..8b2c3235 Binary files /dev/null and b/other-tools/apache-spark/images/spark-running-applications.png differ diff --git a/other-tools/apache-spark/images/spark-web-ui.png b/other-tools/apache-spark/images/spark-web-ui.png new file mode 100644 index 00000000..3f665f29 Binary files /dev/null and b/other-tools/apache-spark/images/spark-web-ui.png differ diff --git a/other-tools/apache-spark/spark/index.html b/other-tools/apache-spark/spark/index.html new file mode 100644 index 00000000..109df22e --- /dev/null +++ b/other-tools/apache-spark/spark/index.html @@ -0,0 +1,4990 @@ + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Apache Spark Cluster Setup on NERC OpenStack

+

Apache Spark Overview

+

Apache Spark is increasingly recognized as the primary +analysis suite for big data, particularly among Python users. Spark offers a robust +Python API and includes several valuable built-in libraries such as MLlib for +machine learning and Spark Streaming for real-time analysis. In contrast to Apache +Hadoop, Spark performs most computations in main +memory boosting the performance.

+

Many modern computational tasks utilize the MapReduce parallel paradigm. This +computational process comprises two stages: Map and Reduce. Before task +execution, all data is distributed across the nodes of the cluster. During the +"Map" stage, the master node dispatches the executable task to the other nodes, +and each worker processes its respective data. The subsequent step is "Reduce" that +involves the master node collecting all results from the workers and generating +final results based on the workers' outcomes. Apache Spark also implements this +model of computations so it provides Big Data Processing abilities.

+

Apache Spark Cluster Setup

+

To get a Spark standalone cluster up and running manually, all you need to do is +spawn some VMs and start Spark as master on one of them and worker on the others. +They will automatically form a cluster that you can connect to/from Python, Java, +and Scala applications using the IP address of the master VM.

+

Setup a Master VM

+
    +
  • +

    To create a master VM for the first time, ensure that the "Image" dropdown option + is selected. In this example, we selected ubuntu-22.04-x86_64 and the cpu-su.2 + flavor is being used.

    +
  • +
  • +

    Make sure you have added rules in the + Security Groups + to allow ssh using Port 22 access to the instance.

    +
  • +
  • +

    Assign a Floating IP + to your new instance so that you will be able to ssh into this machine:

    +
    ssh ubuntu@<Floating-IP> -A -i <Path_To_Your_Private_Key>
    +
    +

    For example:

    +
    ssh ubuntu@199.94.61.4 -A -i cloud.key
    +
    +
  • +
  • +

    Upon successfully accessing the machine, execute the following dependencies:

    +
    sudo apt-get -y update
    +sudo apt install default-jre -y
    +
    +
  • +
  • +

    Download and install Scala:

    +
    wget https://downloads.lightbend.com/scala/2.13.10/scala-2.13.10.deb
    +sudo dpkg -i scala-2.13.10.deb
    +sudo apt-get install scala
    +
    +
    +

    Note

    +

    Installing Scala means installing various command-line tools such as the +Scala compiler and build tools.

    +
    +
  • +
  • +

    Download and unpack Apache Spark:

    +
    SPARK_VERSION="3.4.2"
    +APACHE_MIRROR="dlcdn.apache.org"
    +
    +wget https://$APACHE_MIRROR/spark/spark-$SPARK_VERSION/spark-$SPARK_VERSION-bin-hadoop3-scala2.13.tgz
    +sudo tar -zxvf spark-$SPARK_VERSION-bin-hadoop3-scala2.13.tgz
    +sudo cp -far spark-$SPARK_VERSION-bin-hadoop3-scala2.13 /usr/local/spark
    +
    +
    +

    Very Important Note

    +

    Please ensure you are using the latest Spark version by modifying the +SPARK_VERSION in the above script. Additionally, verify that the version +exists on the APACHE_MIRROR website. Please note the value of SPARK_VERSION +as you will need it during Preparing Jobs for Execution and Examination.

    +
    +
  • +
  • +

    Create an SSH/RSA Key by running ssh-keygen -t rsa without using any passphrase:

    +
    ssh-keygen -t rsa
    +
    +Generating public/private rsa key pair.
    +Enter file in which to save the key (/home/ubuntu/.ssh/id_rsa):
    +Enter passphrase (empty for no passphrase):
    +Enter same passphrase again:
    +Your identification has been saved in /home/ubuntu/.ssh/id_rsa
    +Your public key has been saved in /home/ubuntu/.ssh/id_rsa.pub
    +The key fingerprint is:
    +SHA256:8i/TVSCfrkdV4+Jyqc00RoZZFSHNj8C0QugmBa7RX7U ubuntu@spark-master
    +The key's randomart image is:
    ++---[RSA 3072]----+
    +|      .. ..o..++o|
    +|     o  o.. +o.+.|
    +|    . +o  .o=+.oo|
    +|     +.oo  +o++..|
    +|    o EoS  .+oo  |
    +|     . o   .+B   |
    +|        .. +O .  |
    +|        o.o..o   |
    +|         o..     |
    ++----[SHA256]-----+
    +
    +
  • +
  • +

    Copy and append the contents of SSH public key i.e. ~/.ssh/id_rsa.pub to + the ~/.ssh/authorized_keys file.

    +
  • +
+

Create a Volume Snapshot of the master VM

+
    +
  • +

    Once you're logged in to NERC's Horizon dashboard. You need to Shut Off the + master vm before creating a volume snapshot.

    +

    Click Action -> Shut Off Instance.

    +

    Status will change to Shutoff.

    +
  • +
  • +

    Then, create a snapshot of its attached volume by clicking on the "Create snapshot" + from the Project -> Volumes -> Volumes as described here.

    +
  • +
+

Create Two Worker Instances from the Volume Snapshot

+
    +
  • +

    Once a snapshot is created and is in "Available" status, you can view and manage + it under the Volumes menu in the Horizon dashboard under Volume Snapshots.

    +

    Navigate to Project -> Volumes -> Snapshots.

    +
  • +
  • +

    You have the option to directly launch this volume as an instance by clicking + on the arrow next to "Create Volume" and selecting "Launch as Instance".

    +

    NOTE: Specify Count: 2 to launch 2 instances using the volume snapshot +as shown below:

    +

    Launch 2 Workers From Volume Snapshot

    +
    +

    Naming, Security Group and Flavor for Worker Nodes

    +

    You can specify the "Instance Name" as "spark-worker", and for each instance, +it will automatically append incremental values at the end, such as +spark-worker-1 and spark-worker-2. Also, make sure you have attached +the Security Groups +to allow ssh using Port 22 access to the worker instances.

    +
    +
  • +
+

Additionally, during launch, you will have the option to choose your preferred +flavor for the worker nodes, which can differ from the master VM based on your +computational requirements.

+
    +
  • +

    Navigate to Project -> Compute -> Instances.

    +
  • +
  • +

    Restart the shutdown master VM, click Action -> Start Instance.

    +
  • +
  • +

    The final set up for our Spark cluster looks like this, with 1 master node and + 2 worker nodes:

    +

    Spark Cluster VMs

    +
  • +
+

Configure Spark on the Master VM

+
    +
  • +

    SSH login into the master VM again.

    +
  • +
  • +

    Update the /etc/hosts file to specify all three hostnames with their corresponding + internal IP addresses.

    +
    sudo nano /etc/hosts
    +
    +

    Ensure all hosts are resolvable by adding them to /etc/hosts. You can modify +the following content specifying each VM's internal IP addresses and paste +the updated content at the end of the /etc/hosts file. Alternatively, you +can use sudo cat >> /etc/hosts to append the content directly to the end of +the /etc/hosts file.

    +
    <Master-Internal-IP> master
    +<Worker1-Internal-IP> worker1
    +<Worker2-Internal-IP> worker2
    +
    +
    +

    Very Important Note

    +

    Make sure to use >> instead of > to avoid overwriting the existing content +and append the new content at the end of the file.

    +
    +

    For example, the end of the /etc/hosts file looks like this:

    +
    sudo cat /etc/hosts
    +...
    +192.168.0.46 master
    +192.168.0.26 worker1
    +192.168.0.136 worker2
    +
    +
  • +
  • +

    Verify that you can SSH into both worker nodes by using ssh worker1 and + ssh worker2 from the Spark master node's terminal.

    +
  • +
  • +

    Copy the sample configuration file for the Spark:

    +
    cd /usr/local/spark/conf/
    +cp spark-env.sh.template spark-env.sh
    +
    +
  • +
  • +

    Update the environment variables file i.e. spark-env.sh to include the following + information:

    +
    export SPARK_MASTER_HOST='<Master-Internal-IP>'
    +export JAVA_HOME=<Path_of_JAVA_installation>
    +
    +
    +

    Environment Variables

    +

    Executing this command: readlink -f $(which java) will display the path +to the current Java setup in your VM. For example: +/usr/lib/jvm/java-11-openjdk-amd64/bin/java, you need to remove the +last bin/java part, i.e. /usr/lib/jvm/java-11-openjdk-amd64, to set +it as the JAVA_HOME environment variable. +Learn more about other Spark settings that can be configured through environment +variables here.

    +
    +

    For example:

    +
    echo "export SPARK_MASTER_HOST='192.168.0.46'" >> spark-env.sh
    +echo "export JAVA_HOME=/usr/lib/jvm/java-11-openjdk-amd64" >> spark-env.sh
    +
    +
  • +
  • +

    Source the changed environment variables file i.e. spark-env.sh:

    +
    source spark-env.sh
    +
    +
  • +
  • +

    Create a file named slaves in the Spark configuration directory (i.e., + /usr/local/spark/conf/) that specifies all 3 hostnames (nodes) as specified + in /etc/hosts:

    +
    sudo cat slaves
    +master
    +worker1
    +worker2
    +
    +
  • +
+

Run the Spark cluster from the Master VM

+
    +
  • +

    SSH into the master VM again if you are not already logged in.

    +
  • +
  • +

    You need to run the Spark cluster from /usr/local/spark:

    +
    cd /usr/local/spark
    +
    +# Start all hosts (nodes) including master and workers
    +./sbin/start-all.sh
    +
    +
    +

    How to Stop All Spark Cluster

    +

    To stop all of the Spark cluster nodes, execute ./sbin/stop-all.sh +command from /usr/local/spark.

    +
    +
  • +
+

Connect to the Spark WebUI

+

Apache Spark provides a suite of +web user interfaces (WebUIs) +that you can use to monitor the status and resource consumption of your Spark cluster.

+
+

Different types of Spark Web UI

+

Apache Spark provides different web UIs: Master web UI, Worker web UI, +and Application web UI.

+
+
    +
  • +

    You can connect to the Master web UI using + SSH Port Forwarding, aka SSH Tunneling + i.e. Local Port Forwarding from your local machine's terminal by running:

    +
    ssh -N -L <Your_Preferred_Port>:localhost:8080 <User>@<Floating-IP> -i <Path_To_Your_Private_Key>
    +
    +

    Here, you can choose any port that is available on your machine as <Your_Preferred_Port> +and then master VM's assigned Floating IP as <Floating-IP> and associated +Private Key pair attached to the VM as <Path_To_Your_Private_Key>.

    +

    For example:

    +
    ssh -N -L 8080:localhost:8080 ubuntu@199.94.61.4 -i ~/.ssh/cloud.key
    +
    +
  • +
  • +

    Once the SSH Tunneling is successful, please do not close or stop the terminal + where you are running the SSH Tunneling. Instead, log in to the Master web UI + using your web browser: http://localhost:<Your_Preferred_Port> i.e. http://localhost:8080.

    +
  • +
+

The Master web UI offers an overview of the Spark cluster, showcasing the following +details:

+
    +
  • Master URL and REST URL
  • +
  • Available CPUs and memory for the Spark cluster
  • +
  • Status and allocated resources for each worker
  • +
  • Details on active and completed applications, including their status, resources, + and duration
  • +
  • Details on active and completed drivers, including their status and resources
  • +
+

The Master web UI appears as shown below when you navigate to http://localhost:<Your_Preferred_Port> +i.e. http://localhost:8080 from your web browser:

+

The Master web UI

+

The Master web UI also provides an overview of the applications. Through the +Master web UI, you can easily identify the allocated vCPU (Core) and memory +resources for both the Spark cluster and individual applications.

+

Preparing Jobs for Execution and Examination

+
    +
  • +

    To run jobs from /usr/local/spark, execute the following commands:

    +
    cd /usr/local/spark
    +SPARK_VERSION="3.4.2"
    +
    +
    +

    Very Important Note

    +

    Please ensure you are using the same Spark version that you have +downloaded and installed previously as the value +of SPARK_VERSION in the above script.

    +
    +
  • +
  • +

    Single Node Job:

    +

    Let's quickly start to run a simple job:

    +
    ./bin/spark-submit --driver-memory 2g --class org.apache.spark.examples.SparkPi examples/jars/spark-examples_2.13-$SPARK_VERSION.jar 50
    +
    +
  • +
  • +

    Cluster Mode Job:

    +

    Let's submit a longer and more complex job with many tasks that will be +distributed among the multi-node cluster, and then view the Master web UI:

    +
    ./bin/spark-submit --class org.apache.spark.examples.SparkPi --master spark://master:7077 examples/jars/spark-examples_2.13-$SPARK_VERSION.jar 1000
    +
    +

    While the job is running, you will see a similar view on the Master web UI under +the "Running Applications" section:

    +

    Spark Running Application

    +

    Once the job is completed, it will show up under the "Completed Applications" +section on the Master web UI as shown below:

    +

    Spark Completed Application

    +
  • +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/other-tools/index.html b/other-tools/index.html new file mode 100644 index 00000000..0f66d945 --- /dev/null +++ b/other-tools/index.html @@ -0,0 +1,4623 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Kubernetes

+ +

i. Kubernetes Development environment

+
    +
  1. +

    Minikube

    +
  2. +
  3. +

    Kind

    +
  4. +
  5. +

    MicroK8s

    +
  6. +
  7. +

    K3s

    +

    4.a. K3s with High Availibility(HA) setup

    +

    4.b. Multi-master HA K3s cluster using k3sup

    +

    4.c. Single-Node K3s Cluster using k3d

    +

    4.d. Multi-master K3s cluster setup using k3d

    +
  8. +
  9. +

    k0s

    +
  10. +
+

ii. Kubernetes Production environment

+
    +
  1. +

    Kubeadm

    +

    1.a. Bootstrapping cluster with kubeadm

    +

    1.b. Creating a HA cluster with kubeadm

    +
  2. +
  3. +

    Kubespray

    +
  4. +
+
+

CI/ CD Tools

+ +

Apache Spark

+ +
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/other-tools/kubernetes/comparisons/index.html b/other-tools/kubernetes/comparisons/index.html new file mode 100644 index 00000000..18b09da3 --- /dev/null +++ b/other-tools/kubernetes/comparisons/index.html @@ -0,0 +1,4514 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Comparison

+

k3s vs microk8s Comparision

+

Kubespray vs Kubeadm

+

Kubeadm provides domain Knowledge of +Kubernetes clusters' life cycle management, including self-hosted layouts, +dynamic discovery services and so on. Had it belonged to the new +operators world, it may +have been named a "Kubernetes cluster operator". Kubespray however, does generic +configuration management tasks from the "OS operators" ansible world, plus some +initial K8s clustering (with networking plugins included) and control plane bootstrapping.

+

Kubespray has started using kubeadm internally for cluster creation since v2.3 +in order to consume life cycle management domain knowledge from it and offload +generic OS configuration things from it, which hopefully benefits both sides.

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/other-tools/kubernetes/images/control_plane_ports_protocols.png b/other-tools/kubernetes/images/control_plane_ports_protocols.png new file mode 100644 index 00000000..9ad79950 Binary files /dev/null and b/other-tools/kubernetes/images/control_plane_ports_protocols.png differ diff --git a/other-tools/kubernetes/images/crc_security_group.png b/other-tools/kubernetes/images/crc_security_group.png new file mode 100644 index 00000000..a262f70c Binary files /dev/null and b/other-tools/kubernetes/images/crc_security_group.png differ diff --git a/other-tools/kubernetes/images/k3d-cluster-info.png b/other-tools/kubernetes/images/k3d-cluster-info.png new file mode 100644 index 00000000..d081ebd8 Binary files /dev/null and b/other-tools/kubernetes/images/k3d-cluster-info.png differ diff --git a/other-tools/kubernetes/images/k3d-cluster-list.png b/other-tools/kubernetes/images/k3d-cluster-list.png new file mode 100644 index 00000000..e76cb3ae Binary files /dev/null and b/other-tools/kubernetes/images/k3d-cluster-list.png differ diff --git a/other-tools/kubernetes/images/k3d-nodes-list.png b/other-tools/kubernetes/images/k3d-nodes-list.png new file mode 100644 index 00000000..4645660c Binary files /dev/null and b/other-tools/kubernetes/images/k3d-nodes-list.png differ diff --git a/other-tools/kubernetes/images/k3d_added_new_node.png b/other-tools/kubernetes/images/k3d_added_new_node.png new file mode 100644 index 00000000..ce4f659c Binary files /dev/null and b/other-tools/kubernetes/images/k3d_added_new_node.png differ diff --git a/other-tools/kubernetes/images/k3d_all.png b/other-tools/kubernetes/images/k3d_all.png new file mode 100644 index 00000000..22170759 Binary files /dev/null and b/other-tools/kubernetes/images/k3d_all.png differ diff --git a/other-tools/kubernetes/images/k3d_ha_all.png b/other-tools/kubernetes/images/k3d_ha_all.png new file mode 100644 index 00000000..cf39f30c Binary files /dev/null and b/other-tools/kubernetes/images/k3d_ha_all.png differ diff --git a/other-tools/kubernetes/images/k3d_ha_nodes.png b/other-tools/kubernetes/images/k3d_ha_nodes.png new file mode 100644 index 00000000..30fae943 Binary files /dev/null and b/other-tools/kubernetes/images/k3d_ha_nodes.png differ diff --git a/other-tools/kubernetes/images/k3d_ha_pods.png b/other-tools/kubernetes/images/k3d_ha_pods.png new file mode 100644 index 00000000..912f3765 Binary files /dev/null and b/other-tools/kubernetes/images/k3d_ha_pods.png differ diff --git a/other-tools/kubernetes/images/k3d_nodes.png b/other-tools/kubernetes/images/k3d_nodes.png new file mode 100644 index 00000000..41dd50cb Binary files /dev/null and b/other-tools/kubernetes/images/k3d_nodes.png differ diff --git a/other-tools/kubernetes/images/k3d_restarted_node.png b/other-tools/kubernetes/images/k3d_restarted_node.png new file mode 100644 index 00000000..0dcc25a4 Binary files /dev/null and b/other-tools/kubernetes/images/k3d_restarted_node.png differ diff --git a/other-tools/kubernetes/images/k3d_self_healing_ha_nodes.png b/other-tools/kubernetes/images/k3d_self_healing_ha_nodes.png new file mode 100644 index 00000000..2d51b04e Binary files /dev/null and b/other-tools/kubernetes/images/k3d_self_healing_ha_nodes.png differ diff --git a/other-tools/kubernetes/images/k3s-vs-microk8s.png b/other-tools/kubernetes/images/k3s-vs-microk8s.png new file mode 100644 index 00000000..c832aedf Binary files /dev/null and b/other-tools/kubernetes/images/k3s-vs-microk8s.png differ diff --git a/other-tools/kubernetes/images/k3s_active_agent_status.png b/other-tools/kubernetes/images/k3s_active_agent_status.png new file mode 100644 index 00000000..89966d0d Binary files /dev/null and b/other-tools/kubernetes/images/k3s_active_agent_status.png differ diff --git a/other-tools/kubernetes/images/k3s_active_master_status.png b/other-tools/kubernetes/images/k3s_active_master_status.png new file mode 100644 index 00000000..b9770871 Binary files /dev/null and b/other-tools/kubernetes/images/k3s_active_master_status.png differ diff --git a/other-tools/kubernetes/images/k3s_architecture.png b/other-tools/kubernetes/images/k3s_architecture.png new file mode 100644 index 00000000..4f936bc7 Binary files /dev/null and b/other-tools/kubernetes/images/k3s_architecture.png differ diff --git a/other-tools/kubernetes/images/k3s_ha_architecture.jpg b/other-tools/kubernetes/images/k3s_ha_architecture.jpg new file mode 100644 index 00000000..5cc94c97 Binary files /dev/null and b/other-tools/kubernetes/images/k3s_ha_architecture.jpg differ diff --git a/other-tools/kubernetes/images/k3s_high_availability.png b/other-tools/kubernetes/images/k3s_high_availability.png new file mode 100644 index 00000000..97d79263 Binary files /dev/null and b/other-tools/kubernetes/images/k3s_high_availability.png differ diff --git a/other-tools/kubernetes/images/k3s_security_group.png b/other-tools/kubernetes/images/k3s_security_group.png new file mode 100644 index 00000000..8ff61184 Binary files /dev/null and b/other-tools/kubernetes/images/k3s_security_group.png differ diff --git a/other-tools/kubernetes/images/k3sup.jpg b/other-tools/kubernetes/images/k3sup.jpg new file mode 100644 index 00000000..e3b0a64d Binary files /dev/null and b/other-tools/kubernetes/images/k3sup.jpg differ diff --git a/other-tools/kubernetes/images/k8s-dashboard-docker-app.jpg b/other-tools/kubernetes/images/k8s-dashboard-docker-app.jpg new file mode 100644 index 00000000..db6ce653 Binary files /dev/null and b/other-tools/kubernetes/images/k8s-dashboard-docker-app.jpg differ diff --git a/other-tools/kubernetes/images/k8s-dashboard.jpg b/other-tools/kubernetes/images/k8s-dashboard.jpg new file mode 100644 index 00000000..7b82d05b Binary files /dev/null and b/other-tools/kubernetes/images/k8s-dashboard.jpg differ diff --git a/other-tools/kubernetes/images/k8s_HA_cluster.png b/other-tools/kubernetes/images/k8s_HA_cluster.png new file mode 100644 index 00000000..2e2ce1bf Binary files /dev/null and b/other-tools/kubernetes/images/k8s_HA_cluster.png differ diff --git a/other-tools/kubernetes/images/k8s_components.jpg b/other-tools/kubernetes/images/k8s_components.jpg new file mode 100644 index 00000000..bb010634 Binary files /dev/null and b/other-tools/kubernetes/images/k8s_components.jpg differ diff --git a/other-tools/kubernetes/images/ked-cluster-list.png b/other-tools/kubernetes/images/ked-cluster-list.png new file mode 100644 index 00000000..e76cb3ae Binary files /dev/null and b/other-tools/kubernetes/images/ked-cluster-list.png differ diff --git a/other-tools/kubernetes/images/kubernetes-dashboard-port-type.png b/other-tools/kubernetes/images/kubernetes-dashboard-port-type.png new file mode 100644 index 00000000..7ab83ef2 Binary files /dev/null and b/other-tools/kubernetes/images/kubernetes-dashboard-port-type.png differ diff --git a/other-tools/kubernetes/images/microk8s_dashboard_ports.png b/other-tools/kubernetes/images/microk8s_dashboard_ports.png new file mode 100644 index 00000000..b628bd50 Binary files /dev/null and b/other-tools/kubernetes/images/microk8s_dashboard_ports.png differ diff --git a/other-tools/kubernetes/images/microk8s_microbot_app.png b/other-tools/kubernetes/images/microk8s_microbot_app.png new file mode 100644 index 00000000..5efa5848 Binary files /dev/null and b/other-tools/kubernetes/images/microk8s_microbot_app.png differ diff --git a/other-tools/kubernetes/images/minikube_addons.png b/other-tools/kubernetes/images/minikube_addons.png new file mode 100644 index 00000000..dddf37a7 Binary files /dev/null and b/other-tools/kubernetes/images/minikube_addons.png differ diff --git a/other-tools/kubernetes/images/minikube_config.png b/other-tools/kubernetes/images/minikube_config.png new file mode 100644 index 00000000..ad633b5c Binary files /dev/null and b/other-tools/kubernetes/images/minikube_config.png differ diff --git a/other-tools/kubernetes/images/minikube_dashboard_clusterip.png b/other-tools/kubernetes/images/minikube_dashboard_clusterip.png new file mode 100644 index 00000000..5469ca8e Binary files /dev/null and b/other-tools/kubernetes/images/minikube_dashboard_clusterip.png differ diff --git a/other-tools/kubernetes/images/minikube_dashboard_nodeport.png b/other-tools/kubernetes/images/minikube_dashboard_nodeport.png new file mode 100644 index 00000000..bd1b3f4f Binary files /dev/null and b/other-tools/kubernetes/images/minikube_dashboard_nodeport.png differ diff --git a/other-tools/kubernetes/images/minikube_hello-minikube_page.png b/other-tools/kubernetes/images/minikube_hello-minikube_page.png new file mode 100644 index 00000000..7022d94e Binary files /dev/null and b/other-tools/kubernetes/images/minikube_hello-minikube_page.png differ diff --git a/other-tools/kubernetes/images/minikube_nginx_page.png b/other-tools/kubernetes/images/minikube_nginx_page.png new file mode 100644 index 00000000..4b1dc309 Binary files /dev/null and b/other-tools/kubernetes/images/minikube_nginx_page.png differ diff --git a/other-tools/kubernetes/images/minikube_started.png b/other-tools/kubernetes/images/minikube_started.png new file mode 100644 index 00000000..2949a34b Binary files /dev/null and b/other-tools/kubernetes/images/minikube_started.png differ diff --git a/other-tools/kubernetes/images/module_01.svg b/other-tools/kubernetes/images/module_01.svg new file mode 100644 index 00000000..ec0e55f1 --- /dev/null +++ b/other-tools/kubernetes/images/module_01.svg @@ -0,0 +1 @@ +16.07.28_k8s_visual_diagrams diff --git a/other-tools/kubernetes/images/module_02.svg b/other-tools/kubernetes/images/module_02.svg new file mode 100644 index 00000000..d4106ec1 --- /dev/null +++ b/other-tools/kubernetes/images/module_02.svg @@ -0,0 +1 @@ +16.07.28_k8s_visual_diagrams diff --git a/other-tools/kubernetes/images/module_03.svg b/other-tools/kubernetes/images/module_03.svg new file mode 100644 index 00000000..1ecb989c --- /dev/null +++ b/other-tools/kubernetes/images/module_03.svg @@ -0,0 +1 @@ +16.07.28_k8s_visual_diagrams diff --git a/other-tools/kubernetes/images/module_04.svg b/other-tools/kubernetes/images/module_04.svg new file mode 100644 index 00000000..63ad8e47 --- /dev/null +++ b/other-tools/kubernetes/images/module_04.svg @@ -0,0 +1 @@ +16.07.28_k8s_visual_diagrams diff --git a/other-tools/kubernetes/images/module_05.svg b/other-tools/kubernetes/images/module_05.svg new file mode 100644 index 00000000..382a6c27 --- /dev/null +++ b/other-tools/kubernetes/images/module_05.svg @@ -0,0 +1 @@ +16.07.28_k8s_visual_diagrams diff --git a/other-tools/kubernetes/images/module_06.svg b/other-tools/kubernetes/images/module_06.svg new file mode 100644 index 00000000..97c73217 --- /dev/null +++ b/other-tools/kubernetes/images/module_06.svg @@ -0,0 +1 @@ +16.07.28_k8s_visual_diagrams diff --git a/other-tools/kubernetes/images/network-layout.png b/other-tools/kubernetes/images/network-layout.png new file mode 100644 index 00000000..404b58a4 Binary files /dev/null and b/other-tools/kubernetes/images/network-layout.png differ diff --git a/other-tools/kubernetes/images/nginx-pod-worker-node.png b/other-tools/kubernetes/images/nginx-pod-worker-node.png new file mode 100644 index 00000000..b16caba3 Binary files /dev/null and b/other-tools/kubernetes/images/nginx-pod-worker-node.png differ diff --git a/other-tools/kubernetes/images/nginx_page.png b/other-tools/kubernetes/images/nginx_page.png new file mode 100644 index 00000000..53c3f3da Binary files /dev/null and b/other-tools/kubernetes/images/nginx_page.png differ diff --git a/other-tools/kubernetes/images/okd_architecture.png b/other-tools/kubernetes/images/okd_architecture.png new file mode 100644 index 00000000..55f1e2ce Binary files /dev/null and b/other-tools/kubernetes/images/okd_architecture.png differ diff --git a/other-tools/kubernetes/images/running-nginx-container-app.jpg b/other-tools/kubernetes/images/running-nginx-container-app.jpg new file mode 100644 index 00000000..682e62a0 Binary files /dev/null and b/other-tools/kubernetes/images/running-nginx-container-app.jpg differ diff --git a/other-tools/kubernetes/images/running_minikube_services.png b/other-tools/kubernetes/images/running_minikube_services.png new file mode 100644 index 00000000..20d4f719 Binary files /dev/null and b/other-tools/kubernetes/images/running_minikube_services.png differ diff --git a/other-tools/kubernetes/images/running_pods.png b/other-tools/kubernetes/images/running_pods.png new file mode 100644 index 00000000..b898dbb2 Binary files /dev/null and b/other-tools/kubernetes/images/running_pods.png differ diff --git a/other-tools/kubernetes/images/running_services.png b/other-tools/kubernetes/images/running_services.png new file mode 100644 index 00000000..0c810b89 Binary files /dev/null and b/other-tools/kubernetes/images/running_services.png differ diff --git a/other-tools/kubernetes/images/single_master_architecture.png b/other-tools/kubernetes/images/single_master_architecture.png new file mode 100644 index 00000000..bb7068c0 Binary files /dev/null and b/other-tools/kubernetes/images/single_master_architecture.png differ diff --git a/other-tools/kubernetes/images/skooner-dashboard.png b/other-tools/kubernetes/images/skooner-dashboard.png new file mode 100644 index 00000000..b5d7b152 Binary files /dev/null and b/other-tools/kubernetes/images/skooner-dashboard.png differ diff --git a/other-tools/kubernetes/images/skooner-pod-worker-node.png b/other-tools/kubernetes/images/skooner-pod-worker-node.png new file mode 100644 index 00000000..e2be83d6 Binary files /dev/null and b/other-tools/kubernetes/images/skooner-pod-worker-node.png differ diff --git a/other-tools/kubernetes/images/skooner_port.png b/other-tools/kubernetes/images/skooner_port.png new file mode 100644 index 00000000..f3dec559 Binary files /dev/null and b/other-tools/kubernetes/images/skooner_port.png differ diff --git a/other-tools/kubernetes/images/the_k8s_dashboard.png b/other-tools/kubernetes/images/the_k8s_dashboard.png new file mode 100644 index 00000000..265e3efb Binary files /dev/null and b/other-tools/kubernetes/images/the_k8s_dashboard.png differ diff --git a/other-tools/kubernetes/images/worker_nodes_ports_protocols.png b/other-tools/kubernetes/images/worker_nodes_ports_protocols.png new file mode 100644 index 00000000..c4c98443 Binary files /dev/null and b/other-tools/kubernetes/images/worker_nodes_ports_protocols.png differ diff --git a/other-tools/kubernetes/k0s/index.html b/other-tools/kubernetes/k0s/index.html new file mode 100644 index 00000000..6ef309fd --- /dev/null +++ b/other-tools/kubernetes/k0s/index.html @@ -0,0 +1,4669 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

k0s

+

Key Features

+
    +
  • Available as a single static binary
  • +
  • Offers a self-hosted, isolated control plane
  • +
  • Supports a variety of storage backends, including etcd, SQLite, MySQL (or any + compatible), and PostgreSQL.
  • +
  • Offers an Elastic control plane
  • +
  • Vanilla upstream Kubernetes
  • +
  • Supports custom container runtimes (containerd is the default)
  • +
  • Supports custom Container Network Interface (CNI) plugins (calico is the default)
  • +
  • Supports x86_64 and arm64
  • +
+

Pre-requisite

+

We will need 1 VM to create a single node kubernetes cluster using k0s. +We are using following setting for this purpose:

+
    +
  • +

    1 Linux machine, ubuntu-22.04-x86_64 or your choice of Ubuntu OS image, + cpu-su.2 flavor with 2vCPU, 8GB RAM, 20GB storage - also assign Floating IP + to this VM.

    +
  • +
  • +

    setup Unique hostname to the machine using the following command:

    +
    echo "<node_internal_IP> <host_name>" >> /etc/hosts
    +hostnamectl set-hostname <host_name>
    +
    +

    For example:

    +
    echo "192.168.0.252 k0s" >> /etc/hosts
    +hostnamectl set-hostname k0s
    +
    +
  • +
+

Install k0s on Ubuntu

+

Run the below command on the Ubuntu VM:

+
    +
  • +

    SSH into k0s machine

    +
  • +
  • +

    Switch to root user: sudo su

    +
  • +
  • +

    Update the repositories and packages:

    +
    apt-get update && apt-get upgrade -y
    +
    +
  • +
  • +

    Download k0s:

    +
    curl -sSLf https://get.k0s.sh | sudo sh
    +
    +
  • +
  • +

    Install k0s as a service:

    +
    k0s install controller --single
    +
    +INFO[2021-10-12 01:45:52] no config file given, using defaults
    +INFO[2021-10-12 01:45:52] creating user: etcd
    +INFO[2021-10-12 01:46:00] creating user: kube-apiserver
    +INFO[2021-10-12 01:46:00] creating user: konnectivity-server
    +INFO[2021-10-12 01:46:00] creating user: kube-scheduler
    +INFO[2021-10-12 01:46:01] Installing k0s service
    +
    +
  • +
  • +

    Start k0s as a service:

    +
    k0s start
    +
    +
  • +
  • +

    Check service, logs and k0s status:

    +
    k0s status
    +
    +Version: v1.22.2+k0s.1
    +Process ID: 16625
    +Role: controller
    +Workloads: true
    +
    +
  • +
  • +

    Access your cluster using kubectl:

    +
    k0s kubectl get nodes
    +
    +NAME   STATUS   ROLES    AGE    VERSION
    +k0s    Ready    <none>   8m3s   v1.22.2+k0s
    +
    +
    alias kubectl='k0s kubectl'
    +kubectl get nodes -o wide
    +
    +
    kubectl get all
    +NAME                 TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE
    +service/kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   38s
    +
    +
  • +
+

Uninstall k0s

+
    +
  • +

    Stop the service:

    +
    sudo k0s stop
    +
    +
  • +
  • +

    Execute the k0s reset command - cleans up the installed system service, data + directories, containers, mounts and network namespaces.

    +
    sudo k0s reset
    +
    +
  • +
  • +

    Reboot the system

    +
  • +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/other-tools/kubernetes/k3s/k3s-ha-cluster-using-k3d/index.html b/other-tools/kubernetes/k3s/k3s-ha-cluster-using-k3d/index.html new file mode 100644 index 00000000..db09f9f9 --- /dev/null +++ b/other-tools/kubernetes/k3s/k3s-ha-cluster-using-k3d/index.html @@ -0,0 +1,4682 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Set up K3s in High Availability using k3d

+

First, Kubernetes HA has two possible setups: embedded or external database +(DB). We’ll use the embedded DB in this HA K3s cluster setup. For which etcd +is the default embedded DB.

+

There are some strongly recommended Kubernetes HA best practices +and also there is Automated HA master deployment doc.

+

Pre-requisite

+

Make sure you have already installed k3d following this.

+

HA cluster with at least three control plane nodes

+
k3d cluster create --servers 3 --image rancher/k3s:latest
+
+

Here, --server 3: specifies requests three nodes to be created with the role server +and --image rancher/k3s:latest: specifies the K3s image to be used here we are +using latest

+
    +
  • +

    Switch context to the new cluster:

    +
    kubectl config use-context k3d-k3s-default
    +
    +

    You can now check what has been created from the different points of view:

    +
    kubectl get nodes --output wide
    +
    +

    The output will look like:

    +

    k3d HA nodes

    +
    kubectl get pods --all-namespaces --output wide
    +
    +

    OR,

    +
    kubectl get pods -A -o wide
    +
    +

    The output will look like: +k3d HA pods

    +
  • +
+

Scale up the cluster

+

You can quickly simulate the addition of another control plane node to the HA cluster:

+
k3d node create extraCPnode --role=server --image=rancher/k3s:latest
+
+INFO[0000] Adding 1 node(s) to the runtime local cluster 'k3s-default'...
+INFO[0000] Starting Node 'k3d-extraCPnode-0'
+INFO[0018] Updating loadbalancer config to include new server node(s)
+INFO[0018] Successfully configured loadbalancer k3d-k3s-default-serverlb!
+INFO[0019] Successfully created 1 node(s)!
+
+

Here, extraCPnode: specifies the name for the node, +--role=server : sets the role for the node to be a control plane/server, +--image rancher/k3s:latest: specifies the K3s image to be used here we are +using latest

+
kubectl get nodes
+
+NAME                       STATUS   ROLES         AGE   VERSION
+k3d-extracpnode-0          Ready    etcd,master   31m   v1.19.3+k3s2
+k3d-k3s-default-server-0   Ready    etcd,master   47m   v1.19.3+k3s2
+k3d-k3s-default-server-1   Ready    etcd,master   47m   v1.19.3+k3s2
+k3d-k3s-default-server-2   Ready    etcd,master   47m   v1.19.3+k3s2
+
+

OR,

+
kubectl get nodes --output wide
+
+

The output looks like below:

+

k3d added new node

+

Heavy Armored against crashes

+

As we are working with containers, the best way to "crash" a node is to literally +stop the container:

+
docker stop k3d-k3s-default-server-0
+
+
+

Note

+

The Docker and k3d commands will show the state change immediately. However, +the Kubernetes (read: K8s or K3s) cluster needs a short time to see the state +change to NotReady.

+
+
kubectl get nodes
+
+NAME                       STATUS     ROLES         AGE   VERSION
+k3d-extracpnode-0          Ready      etcd,master   32m   v1.19.3+k3s2
+k3d-k3s-default-server-0   NotReady   etcd,master   48m   v1.19.3+k3s2
+k3d-k3s-default-server-1   Ready      etcd,master   48m   v1.19.3+k3s2
+k3d-k3s-default-server-2   Ready      etcd,master   48m   v1.19.3+k3s2
+
+

Now it is a good time to reference again the load balancer k3d uses and how it is +critical in allowing us to continue accessing the K3s cluster.

+

While the load balancer internally switched to the next available node, from an +external connectivity point of view, we still use the same IP/host. This abstraction +saves us quite some efforts and it’s one of the most useful features of k3d.

+

Let’s look at the state of the cluster:

+
kubectl get all --all-namespaces
+
+

The output looks like below:

+

k3d HA all

+

Everything looks right. If we look at the pods more specifically, then we will +see that K3s automatically self-healed by recreating pods running on the failed +node on other nodes:

+
kubectl get pods --all-namespaces --output wide
+
+

As the output can be seen:

+

k3d self healing HA nodes

+

Finally, to show the power of HA and how K3s manages it, let’s restart the node0 +and see it being re-included into the cluster as if nothing happened:

+
docker start k3d-k3s-default-server-0
+
+

Our cluster is stable, and all the nodes are fully operational again as shown below: +k3d restarted node

+

Cleaning the resources

+
k3d cluster delete
+
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/other-tools/kubernetes/k3s/k3s-ha-cluster/index.html b/other-tools/kubernetes/k3s/k3s-ha-cluster/index.html new file mode 100644 index 00000000..e057cf12 --- /dev/null +++ b/other-tools/kubernetes/k3s/k3s-ha-cluster/index.html @@ -0,0 +1,4898 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

K3s with High Availability setup

+

First, Kubernetes HA has two possible setups: embedded or external database +(DB). We’ll use the external DB in this HA K3s cluster setup. For which MySQL +is the external DB as shown here: +k3s HA architecture with external database

+

In the diagram above, both the user running kubectl and each of the two agents +connect to the TCP Load Balancer. The Load Balancer uses a list of private IP +addresses to balance the traffic between the three servers. If one of the +servers crashes, it is be removed from the list of IP addresses.

+

The servers use the SQL data store to synchronize the cluster’s state.

+

Requirements

+

i. Managed TCP Load Balancer

+

ii. Managed MySQL service

+

iii. Three VMs to run as K3s servers

+

iv. Two VMs to run as K3s agents

+

There are some strongly recommended Kubernetes HA best practices +and also there is Automated HA master deployment doc.

+

Managed TCP Load Balancer

+

Create a load balancer using nginx: +The nginx.conf located at etc/nginx/nginx.conf contains upstream that is pointing +to the 3 K3s Servers on port 6443 as shown below:

+
events {}
+...
+
+stream {
+  upstream k3s_servers {
+    server <k3s_server1-Internal-IP>:6443;
+    server <k3s_server2-Internal-IP>:6443;
+    server <k3s_server3-Internal-IP>:6443;
+  }
+
+  server {
+    listen 6443;
+    proxy_pass k3s_servers;
+  }
+}
+
+

Managed MySQL service

+

Create a MySQL database server with a new database and create a new +mysql user and password with granted permission to read/write the new database. +In this example, you can create:

+

database name: <YOUR_DB_NAME> +database user: <YOUR_DB_USER_NAME> +database password: <YOUR_DB_USER_PASSWORD>

+

Three VMs to run as K3s servers

+

Create 3 K3s Master VMs and perform the following steps on each of them:

+

i. Export the datastore endpoint:

+
export K3S_DATASTORE_ENDPOINT='mysql://<YOUR_DB_USER_NAME>:<YOUR_DB_USER_PASSWORD>@tcp(<MySQL-Server-Internal-IP>:3306)/<YOUR_DB_NAME>'
+
+

ii. Install the K3s with setting not to deploy any pods on this server +(opposite of affinity) unless critical addons and tls-san set <Loadbalancer-Internal-IP> +as alternative name for that tls certificate.

+
curl -sfL https://get.k3s.io | sh -s - server \
+    --node-taint CriticalAddonsOnly=true:NoExecute \
+    --tls-san <Loadbalancer-Internal-IP_or_Hostname>
+
+
    +
  • +

    Verify all master nodes are visible to one another:

    +
    sudo k3s kubectl get node
    +
    +
  • +
  • +

    Generate token from one of the K3s Master VMs: + You need to extract a token from the master that will be used to join the nodes + to the control plane by running following command on one of the K3s master node:

    +
    sudo cat /var/lib/rancher/k3s/server/node-token
    +
    +

    You will then obtain a token that looks like:

    +
    K1097aace305b0c1077fc854547f34a598d23330ff047ddeed8beb3c428b38a1ca7::server:6cc9fbb6c5c9de96f37fb14b5535c778
    +
    +
  • +
+

Two VMs to run as K3s agents

+

Set the K3S_URL to point to the Loadbalancer’s internal IP and set the K3S_TOKEN +from the clipboard on both of the agent nodes:

+
curl -sfL https://get.k3s.io | K3S_URL=https://<Loadbalancer-Internal-IP_or_Hostname>:6443
+    K3S_TOKEN=<Token_From_Master> sh -
+
+

Once both Agents are running, if you run the following command on Master Server, +you can see all nodes:

+
sudo k3s kubectl get node
+
+

Simulate a failure

+

To simulate a failure, stop the K3s service on one or more of the K3s servers manually, +then run the kubectl get nodes command:

+
sudo systemctl stop k3s
+
+

The third server will take over at this point.

+
    +
  • +

    To restart servers manually:

    +
    sudo systemctl restart k3s
    +
    +
  • +
+

On your local development machine to access Kubernetes Cluster Remotely (Optional)

+
+

Important Requirement

+

Your local development machine must have installed kubectl.

+
+
    +
  • +

    Copy kubernetes config to your local machine: + Copy the kubeconfig file's content located at the K3s master node at /etc/rancher/k3s/k3s.yaml + to your local machine's ~/.kube/config file. Before saving, please change the + cluster server path from 127.0.0.1 to <Loadbalancer-Internal-IP>. This + will allow your local machine to see the cluster nodes:

    +
    kubectl get nodes
    +
    +
  • +
+

Kubernetes Dashboard

+

The Kubernetes Dashboard +is a GUI tool to help you work more efficiently with K8s cluster. This is only +accessible from within the cluster (at least not without some serious tweaking).

+

check releases for the command +to use for Installation:

+
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.3.1/aio/deploy/recommended.yaml
+
+
    +
  • +

    Dashboard RBAC Configuration:

    +

    dashboard.admin-user.yml

    +
    apiVersion: v1
    +kind: ServiceAccount
    +metadata:
    +  name: admin-user
    +  namespace: kubernetes-dashboard
    +
    +

    dashboard.admin-user-role.yml

    +
    apiVersion: rbac.authorization.k8s.io/v1
    +kind: ClusterRoleBinding
    +metadata:
    +  name: admin-user
    +roleRef:
    +  apiGroup: rbac.authorization.k8s.io
    +  kind: ClusterRole
    +  name: cluster-admin
    +subjects:
    +- kind: ServiceAccount
    +  name: admin-user
    +  namespace: kubernetes-dashboard
    +
    +
  • +
  • +

    Deploy the admin-user configuration:

    +
    sudo k3s kubectl create -f dashboard.admin-user.yml -f dashboard.admin-user-role.yml
    +
    +
    +

    Important Note

    +

    If you're doing this from your local development machine, remove sudo k3s +and just use kubectl)

    +
    +
  • +
  • +

    Get bearer token

    +
    sudo k3s kubectl -n kubernetes-dashboard describe secret admin-user-token \
    +  | grep ^token
    +
    +
  • +
  • +

    Start dashboard locally:

    +
    sudo k3s kubectl proxy
    +
    +

    Then you can sign in at this URL using your token we got in the previous step:

    +
    http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/
    +
    +
  • +
+

Deploying Nginx using deployment

+
    +
  • +

    Create a deployment nginx.yaml:

    +
    vi nginx.yaml
    +
    +
  • +
  • +

    Copy and paste the following content in nginx.yaml:

    +
    apiVersion: apps/v1
    +kind: Deployment
    +metadata:
    +  name: mysite
    +  labels:
    +    app: mysite
    +spec:
    +  replicas: 1
    +  selector:
    +    matchLabels:
    +      app: mysite
    +  template:
    +    metadata:
    +      labels:
    +        app : mysite
    +    spec:
    +      containers:
    +        - name : mysite
    +          image: nginx
    +          ports:
    +            - containerPort: 80
    +
    +
    sudo k3s kubectl apply -f nginx.yaml
    +
    +
  • +
  • +

    Verify the nginx pod is in Running state:

    +
    sudo k3s kubectl get pods --all-namespaces
    +
    +

    OR,

    +
    kubectl get pods --all-namespaces --output wide
    +
    +

    OR,

    +
    kubectl get pods -A -o wide
    +
    +
  • +
  • +

    Scale the pods to available agents:

    +
    sudo k3s kubectl scale --replicas=2 deploy/mysite
    +
    +
  • +
  • +

    View all deployment status:

    +
    sudo k3s kubectl get deploy mysite
    +
    +NAME     READY   UP-TO-DATE   AVAILABLE   AGE
    +mysite   2/2     2            2           85s
    +
    +
  • +
  • +

    Delete the nginx deployment and pod:

    +
    sudo k3s kubectl delete -f nginx.yaml
    +
    +

    OR,

    +
    sudo k3s kubectl delete deploy mysite
    +
    +
  • +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/other-tools/kubernetes/k3s/k3s-using-k3d/index.html b/other-tools/kubernetes/k3s/k3s-using-k3d/index.html new file mode 100644 index 00000000..6906580c --- /dev/null +++ b/other-tools/kubernetes/k3s/k3s-using-k3d/index.html @@ -0,0 +1,4813 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Setup K3s cluster Using k3d

+

One of the most popular and second method of creating k3s cluster is by using k3d. +By the name itself it suggests, K3s-in-docker, is a wrapper around K3s – Lightweight +Kubernetes that runs it in docker. Please refer to this link +to get brief insights of this wonderful tool. It provides a seamless experience +working with K3s cluster management with some straight forward commands. k3d is +efficient enough to create and manage K3s single node and well as K3s High +Availability clusters just with few commands.

+
+

Note

+

For using k3d you must have docker installed in your system

+
+
+

Install Docker

+
    +
  • +

    Install container runtime - docker

    +
    apt-get install docker.io -y
    +
    +
  • +
  • +

    Configure the Docker daemon, in particular to use systemd for the management + of the container’s cgroups

    +
    cat <<EOF | sudo tee /etc/docker/daemon.json
    +{
    +"exec-opts": ["native.cgroupdriver=systemd"]
    +}
    +EOF
    +
    +systemctl enable --now docker
    +usermod -aG docker ubuntu
    +systemctl daemon-reload
    +systemctl restart docker
    +
    +
  • +
+
+

Install kubectl

+
    +
  • +

    Install kubectl binary

    +

    kubectl: the command line util to talk to your cluster.

    +
    snap install kubectl --classic
    +
    +

    This outputs:

    +
    kubectl 1.26.1 from Canonical✓ installed
    +
    +
  • +
  • +

    Now verify the kubectl version:

    +
    kubectl version -o yaml
    +
    +
  • +
+
+

Installing k3d

+

k3d Installation

+

The below command will install the k3d, in your system using the installation script.

+
wget -q -O - https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash
+
+

OR,

+
curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash
+
+

To verify the installation, please run the following command:

+
k3d version
+
+k3d version v5.0.0
+k3s version v1.21.5-k3s1 (default)
+
+

After the successful installation, you are ready to create your cluster using +k3d and run K3s in docker within seconds.

+

Getting Started

+

Now let's directly jump into creating our K3s cluster using k3d.

+
    +
  1. +

    Create k3d Cluster:

    +
    k3d cluster create k3d-demo-cluster
    +
    +

    This single command spawns a K3s cluster with two containers: A Kubernetes +control-plane node(server) and a load balancer(serverlb) in front +of it. It puts both of them in a dedicated Docker network and exposes the +Kubernetes API on a randomly chosen free port on the Docker host. It also +creates a named Docker volume in the background as a preparation for image +imports.

    +

    You can also look for advance syntax for cluster creation:

    +
    k3d cluster create mycluster --api-port 127.0.0.1:6445 --servers 3 \
    +    --agents 2 --volume '/home/me/mycode:/code@agent[*]' --port '8080:80@loadbalancer'
    +
    +

    Here, the above single command spawns a K3s cluster with six containers:

    +
      +
    • +

      load balancer

      +
    • +
    • +

      3 servers (control-plane nodes)

      +
    • +
    • +

      2 agents (formerly worker nodes)

      +
    • +
    +

    With the --api-port 127.0.0.1:6445, you tell k3d to map the Kubernetes +API Port (6443 internally) to 127.0.0.1/localhost’s port 6445. That +means that you will have this connection string in your Kubeconfig: +server: https://127.0.0.1:6445 to connect to this cluster.

    +

    This port will be mapped from the load balancer to your host system. From +there, requests will be proxied to your server nodes, effectively simulating +a production setup, where server nodes also can go down and you would want +to failover to another server.

    +

    The --volume /home/me/mycode:/code@agent[*] bind mounts your local directory +/home/me/mycode to the path /code inside all ([*] of your agent nodes). +Replace * with an index (here: 0 or 1) to only mount it into one of them.

    +

    The specification telling k3d which nodes it should mount the volume to +is called "node filter" and it’s also used for other flags, like the --port +flag for port mappings.

    +

    That said, --port '8080:80@loadbalancer' maps your local host’s port 8080 +to port 80 on the load balancer (serverlb), which can be used to forward +HTTP ingress traffic to your cluster. For example, you can now deploy a +web app into the cluster (Deployment), which is exposed (Service) externally +via an Ingress such as myapp.k3d.localhost.

    +

    Then (provided that everything is set up to resolve that domain to your +localhost IP), you can point your browser to http://myapp.k3d.localhost:8080 +to access your app. Traffic then flows from your host through the Docker +bridge interface to the load balancer. From there, it’s proxied to the +cluster, where it passes via Ingress and Service to your application Pod.

    +
    +

    Note

    +

    You have to have some mechanism set up to route to resolve myapp.k3d.localhost +to your local host IP (127.0.0.1). The most common way is using entries +of the form 127.0.0.1 myapp.k3d.localhost in your /etc/hosts file +(C:\Windows\System32\drivers\etc\hosts on Windows). However, this does +not allow for wildcard entries (*.localhost), so it may become a bit +cumbersome after a while, so you may want to have a look at tools like +dnsmasq (MacOS/UNIX) or Acrylic (Windows) to ease the burden.

    +
    +
  2. +
  3. +

    Getting the cluster’s kubeconfig: + Get the new cluster’s connection details merged into your default kubeconfig + (usually specified using the KUBECONFIG environment variable or the default + path $HOME/.kube/config) and directly switch to the new context:

    +
    k3d kubeconfig merge k3d-demo-cluster --kubeconfig-switch-context
    +
    +

    This outputs:

    +
    /root/.k3d/kubeconfig-k3d-demo-cluster.yaml
    +
    +
  4. +
  5. +

    Checking the nodes running on k3d cluster:

    +
    k3d node list
    +
    +

    k3d nodes list

    +

    You can see here two nodes. The (very) smart implementation here is that +while the cluster is running on its node k3d-k3s-default-server-0, +there is another "node" that acts as the load balancer i.e. k3d-k3d-demo-cluster-serverlb.

    +
  6. +
  7. +

    Firing Kubectl commands that allows you to run commands against Kubernetes:

    +

    i. The below command will list the nodes available in our cluster:

    +
    kubectl get nodes -o wide
    +
    +

    OR,

    +
    kubectl get nodes --output wide
    +
    +

    The output will look like:

    +

    k3d nodes list

    +

    ii. To look at what’s inside the K3s cluster (pods, services, deployments, +etc.):

    +
    kubectl get all --all-namespaces
    +
    +

    The output will look like:

    +

    k3d all

    +

    We can see that, in addition to the Kubernetes service, K3s deploys DNS, +metrics and ingress (traefik) services when you use the defaults.

    +

    iii. List the active k3d clusters:

    +
    k3d cluster list
    +
    +

    k3d cluster list

    +

    iv. Check the cluster connectivity:

    +
    kubectl cluster-info
    +
    +

    kubectl cluster-info

    +

    To further debug and diagnose cluster problems, use 'kubectl cluster-info +dump'.

    +
  8. +
  9. +

    Check the active containers:

    +
    docker ps
    +
    +
  10. +
+

Now as you can observe, the cluster is up and running and we can play around +with the cluster, you can create and deploy your applications over the cluster.

+

Deleting Cluster

+
k3d cluster delete k3d-demo-cluster
+
+INFO[0000] Deleting cluster 'k3d-demo-cluster'
+INFO[0000] Deleted k3d-k3d-demo-cluster-serverlb
+INFO[0001] Deleted k3d-k3d-demo-cluster-server-0
+INFO[0001] Deleting cluster network 'k3d-k3d-demo-cluster'
+INFO[0001] Deleting image volume 'k3d-k3d-demo-cluster-images'
+INFO[0001] Removing cluster details from default kubeconfig...
+INFO[0001] Removing standalone kubeconfig file (if there is one)...
+INFO[0001] Successfully deleted cluster k3d-demo-cluster!
+
+

You can also create a k3d High Availability cluster +and add as many nodes you want within seconds.

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/other-tools/kubernetes/k3s/k3s-using-k3sup/index.html b/other-tools/kubernetes/k3s/k3s-using-k3sup/index.html new file mode 100644 index 00000000..cf21d2fe --- /dev/null +++ b/other-tools/kubernetes/k3s/k3s-using-k3sup/index.html @@ -0,0 +1,4635 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

K3s cluster setup using k3sup

+

k3sup (pronounced ketchup) is a popular open source tool to install K3s over +SSH.

+
    +
  • Bootstrap the cluster + k3sup Setup
  • +
+

The two most important commands in k3sup are:

+

i. install: install K3s to a new server and create a join token for the cluster

+

ii. join: fetch the join token from a server, then use it to install K3s to an +agent

+

Download k3sup

+
curl -sLS https://get.k3sup.dev | sh
+sudo install k3sup /usr/bin/
+
+k3sup --help
+
+
    +
  • +

    Other options for install:

    +

    --cluster - start this server in clustering mode using embedded etcd (embedded +HA)

    +

    --skip-install - if you already have k3s installed, you can just run this command +to get the kubeconfig

    +

    --ssh-key - specify a specific path for the SSH key for remote login

    +

    --local-path - default is ./kubeconfig - set the file where you want to save +your cluster's kubeconfig. By default this file will be overwritten.

    +

    --merge - Merge config into existing file instead of overwriting (e.g. to add +config to the default kubectl config, use --local-path ~/.kube/config --merge).

    +

    --context - default is default - set the name of the kubeconfig context.

    +

    --ssh-port - default is 22, but you can specify an alternative port i.e. 2222

    +

    --k3s-extra-args - Optional extra arguments to pass to k3s installer, wrapped +in quotes, i.e. --k3s-extra-args '--no-deploy traefik' or +--k3s-extra-args '--docker'. For multiple args combine then within single +quotes --k3s-extra-args

    +

    --no-deploy traefik --docker.

    +

    --k3s-version - set the specific version of k3s, i.e. v0.9.1

    +

    --ipsec - Enforces the optional extra argument for k3s: --flannel-backend +option: ipsec

    +

    --print-command - Prints out the command, sent over SSH to the remote computer

    +

    --datastore - used to pass a SQL connection-string to the --datastore-endpoint +flag of k3s.

    +

    See even more install options by running k3sup install --help.

    +
  • +
  • +

    On Master Node:

    +
    export SERVER_IP=<Master-Internal-IP>
    +export USER=root
    +
    +k3sup install --ip $SERVER_IP --user $USER
    +
    +
  • +
  • +

    On Agent Node:

    +

    Next join one or more agents to the cluster:

    +
    export AGENT_IP=<Agent-Internal-IP>
    +
    +export SERVER_IP=<Master-Internal-IP>
    +export USER=root
    +
    +k3sup join --ip $AGENT_IP --server-ip $SERVER_IP --user $USER
    +
    +
  • +
+

Create a multi-master (HA) setup with external SQL

+
export LB_IP='<Loadbalancer-Internal-IP_or_Hostname>'
+export DATASTORE='mysql://<YOUR_DB_USER_NAME>:<YOUR_DB_USER_PASSWORD>@tcp(<MySQL-Server-Internal-IP>:3306)/<YOUR_DB_NAME>'
+export CHANNEL=latest
+
+

Before continuing, check that your environment variables are still populated from +earlier, and if not, trace back and populate them.

+
echo $LB_IP
+echo $DATASTORE
+echo $CHANNEL
+
+
k3sup install --user root --ip $SERVER1 \
+--k3s-channel $CHANNEL \
+--print-command \
+--datastore='${DATASTORE}' \
+--tls-san $LB_IP
+
+k3sup install --user root --ip $SERVER2 \
+--k3s-channel $CHANNEL \
+--print-command \
+--datastore='${DATASTORE}' \
+--tls-san $LB_IP
+
+k3sup install --user root --ip $SERVER3 \
+--k3s-channel $CHANNEL \
+--print-command \
+--datastore='${DATASTORE}' \
+--tls-san $LB_IP
+
+k3sup join --user root --server-ip $LB_IP --ip $AGENT1 \
+--k3s-channel $CHANNEL \
+--print-command
+
+k3sup join --user root --server-ip $LB_IP --ip $AGENT2 \
+--k3s-channel $CHANNEL \
+--print-command
+
+
+

There will be a kubeconfig file created in the current working directory with the +IP address of the LoadBalancer set for kubectl to use.

+
    +
  • +

    Check the nodes have joined:

    +
    export KUBECONFIG=`pwd`/kubeconfig
    +kubectl get node
    +
    +
  • +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/other-tools/kubernetes/k3s/k3s/index.html b/other-tools/kubernetes/k3s/k3s/index.html new file mode 100644 index 00000000..5f1c0c8c --- /dev/null +++ b/other-tools/kubernetes/k3s/k3s/index.html @@ -0,0 +1,5210 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

K3s

+

Features

+
    +
  • +

    Lightweight certified K8s distro

    +
  • +
  • +

    Built for production operations

    +
  • +
  • +

    40MB binary, 250MB memeory consumption

    +
  • +
  • +

    Single process w/ integrated K8s master, Kubelet, and containerd

    +
  • +
  • +

    Supports not only etcd to hold the cluster state, but also SQLite + (for single-node, simpler setups) or external DBs like MySQL and PostgreSQL

    +
  • +
  • +

    Open source project

    +
  • +
+

Components and architecure

+

K3s Components and architecure

+
    +
  • +

    High-Availability K3s Server with an External DB:

    +

    K3s Components and architecure or, +K3s Components and architecure

    +

    For this kind of high availability k3s setup read this.

    +
  • +
+

Pre-requisite

+

We will need 1 control-plane(master) and 2 worker nodes to create a single +control-plane kubernetes cluster using k3s. We are using following setting +for this purpose:

+
    +
  • +

    1 Linux machine for master, ubuntu-22.04-x86_64 or your choice of Ubuntu OS + image, cpu-su.2 flavor with 2vCPU, 8GB RAM, 20GB storage - also + assign Floating IP + to the master node.

    +
  • +
  • +

    2 Linux machines for worker, ubuntu-22.04-x86_64 or your choice of Ubuntu OS + image, cpu-su.1 flavor with 1vCPU, 4GB RAM, 20GB storage.

    +
  • +
  • +

    ssh access to all machines: Read more here + on how to set up SSH on your remote VMs.

    +
  • +
+

Networking

+

The K3s server needs port 6443 to be accessible by all nodes.

+

The nodes need to be able to reach other nodes over UDP port 8472 when Flannel +VXLAN overlay networking is used. The node should not listen on any other port. K3s +uses reverse tunneling such that the nodes make outbound connections to the server +and all kubelet traffic runs through that tunnel. However, if you do not use Flannel +and provide your own custom CNI, then port 8472 is not needed by K3s.

+

If you wish to utilize the metrics server, you will need to open port 10250 +on each node.

+

If you plan on achieving high availability with embedded etcd, server nodes +must be accessible to each other on ports 2379 and 2380.

+
    +
  • +

    Create 1 security group with appropriate Inbound Rules for K3s Server Nodes + that will be used by all 3 nodes:

    +

    Inbound Rules for K3s Server Nodes

    +
    +

    Important Note

    +

    The VXLAN overlay networking port on nodes should not be exposed to the world +as it opens up your cluster network to be accessed by anyone. Run your nodes +behind a firewall/security group that disables access to port 8472.

    +
    +
  • +
  • +

    setup Unique hostname to each machine using the following command:

    +
    echo "<node_internal_IP> <host_name>" >> /etc/hosts
    +hostnamectl set-hostname <host_name>
    +
    +

    For example:

    +
    echo "192.168.0.235 k3s-master" >> /etc/hosts
    +hostnamectl set-hostname k3s-master
    +
    +
  • +
+

In this step, you will setup the following nodes:

+
    +
  • +

    k3s-master

    +
  • +
  • +

    k3s-worker1

    +
  • +
  • +

    k3s-worker2

    +
  • +
+

The below steps will be performed on all the above mentioned nodes:

+
    +
  • +

    SSH into all the 3 machines

    +
  • +
  • +

    Switch as root: sudo su

    +
  • +
  • +

    Update the repositories and packages:

    +
    apt-get update && apt-get upgrade -y
    +
    +
  • +
  • +

    Install curl and apt-transport-https

    +
    apt-get update && apt-get install -y apt-transport-https curl
    +
    +
  • +
+
+

Install Docker

+
    +
  • +

    Install container runtime - docker

    +
    apt-get install docker.io -y
    +
    +
  • +
  • +

    Configure the Docker daemon, in particular to use systemd for the management + of the container’s cgroups

    +
    cat <<EOF | sudo tee /etc/docker/daemon.json
    +{
    +"exec-opts": ["native.cgroupdriver=systemd"]
    +}
    +EOF
    +
    +systemctl enable --now docker
    +usermod -aG docker ubuntu
    +systemctl daemon-reload
    +systemctl restart docker
    +
    +
  • +
+
+

Configure K3s to bootstrap the cluster on master node

+

Run the below command on the master node i.e. k3s-master that you want to setup +as control plane.

+
    +
  • +

    SSH into k3s-master machine

    +
  • +
  • +

    Switch to root user: sudo su

    +
  • +
  • +

    Execute the below command to initialize the cluster:

    +
    curl -sfL https://get.k3s.io | sh -s - --kubelet-arg 'cgroup-driver=systemd' \
    +--node-taint CriticalAddonsOnly=true:NoExecute --docker
    +
    +

    OR, +If you don't want to setup the K3s cluster without using docker as the +container runtime, then just run without supplying the --docker argument.

    +
    curl -sfL https://get.k3s.io | sh -
    +
    +
  • +
+

After running this installation:

+
    +
  • +

    The K3s service will be configured to automatically restart after node reboots + or if the process crashes or is killed

    +
  • +
  • +

    Additional utilities will be installed, including kubectl, crictl, ctr, + k3s-killall.sh, and k3s-uninstall.sh

    +
  • +
  • +

    A kubeconfig file will be written to /etc/rancher/k3s/k3s.yaml and the kubectl + installed by K3s will automatically use it.

    +
  • +
+

To check if the service installed successfully, you can use:

+
systemctl status k3s
+
+

The output looks like:

+

K3s Active Master Status

+

OR,

+
k3s --version
+kubectl version
+
+
+

Note

+

If you want to taint the node i.e. not to deploy pods on this node after +installation then run: kubectl taint nodes <master_node_name> k3s-controlplane=true:NoExecute +i.e. kubectl taint nodes k3s-master k3s-controlplane=true:NoExecute

+
+

You can check if the master node is working by:

+
k3s kubectl get nodes
+
+NAME         STATUS   ROLES                  AGE   VERSION
+k3s-master   Ready    control-plane,master   37s   v1.21.5+k3s2
+
+
kubectl config get-clusters
+
+NAME
+default
+
+
kubectl cluster-info
+
+Kubernetes control plane is running at https://127.0.0.1:6443
+CoreDNS is running at https://127.0.0.1:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
+Metrics-server is running at https://127.0.0.1:6443/api/v1/namespaces/kube-system/services/https:metrics-server:/proxy
+
+To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
+
+
kubectl get namespaces
+
+NAME              STATUS   AGE
+default           Active   27m
+kube-system       Active   27m
+kube-public       Active   27m
+kube-node-lease   Active   27m
+
+
kubectl get endpoints -n kube-system
+
+NAME                    ENDPOINTS                                  AGE
+kube-dns                10.42.0.4:53,10.42.0.4:53,10.42.0.4:9153   27m
+metrics-server          10.42.0.3:443                              27m
+rancher.io-local-path   <none>                                     27m
+
+
kubectl get pods -n kube-system
+
+NAME                                      READY   STATUS    RESTARTS   AGE
+helm-install-traefik-crd-ql7j2            0/1     Pending   0          32m
+helm-install-traefik-mr65j                0/1     Pending   0          32m
+coredns-7448499f4d-x57z7                  1/1     Running   0          32m
+metrics-server-86cbb8457f-cg2fs           1/1     Running   0          32m
+local-path-provisioner-5ff76fc89d-kdfcl   1/1     Running   0          32m
+
+

You need to extract a token from the master that will be used to join the nodes +to the master.

+

On the master node:

+
sudo cat /var/lib/rancher/k3s/server/node-token
+
+

You will then obtain a token that looks like:

+
K1097aace305b0c1077fc854547f34a598d2::server:6cc9fbb6c5c9de96f37fb14b8
+
+
+

Configure K3s on worker nodes to join the cluster

+

Run the below command on both of the worker nodes i.e. k3s-worker1 and k3s-worker2 +that you want to join the cluster.

+
    +
  • +

    SSH into k3s-worker1 and k3s-worker1 machine

    +
  • +
  • +

    Switch to root user: sudo su

    +
  • +
  • +

    Execute the below command to join the cluster using the token obtained from + the master node:

    +

    To install K3s on worker nodes and add them to the cluster, run the installation +script with the K3S_URL and K3S_TOKEN environment variables. Here is an example +showing how to join a worker node:

    +
    curl -sfL https://get.k3s.io | K3S_URL=https://<Master-Internal-IP>:6443 \
    +K3S_TOKEN=<Join_Token> sh -
    +
    +

    Where <Master-Internal-IP> is the Internal IP of the master node and <Join_Token> +is the token obtained from the master node.

    +

    For example:

    +
    curl -sfL https://get.k3s.io | K3S_URL=https://192.168.0.154:6443 \
    +K3S_TOKEN=K1019827f88b77cc5e1dce04d692d445c1015a578dafdc56aca829b2f
    +501df9359a::server:1bf0d61c85c6dac6d5a0081da55f44ba sh -
    +
    +

    You can verify if the k3s-agent on both of the worker nodes is running by:

    +
    systemctl status k3s-agent
    +
    +

    The output looks like:

    +

    K3s Active Agent Status

    +
  • +
+
+

To verify that our nodes have successfully been added to the cluster, run the +following command on master node:

+
k3s kubectl get nodes
+
+

OR,

+
k3s kubectl get nodes -o wide
+
+

Your output should look like:

+
k3s kubectl get nodes
+
+NAME          STATUS   ROLES                  AGE     VERSION
+k3s-worker1   Ready    <none>                 5m16s   v1.21.5+k3s2
+k3s-worker2   Ready    <none>                 5m5s    v1.21.5+k3s2
+k3s-master    Ready    control-plane,master   9m33s   v1.21.5+k3s2
+
+

This shows that we have successfully setup our K3s cluster ready to deploy applications +to it.

+
+

Deploying Nginx using deployment

+
    +
  • +

    Create a deployment nginx.yaml on master node

    +
    vi nginx.yaml
    +
    +

    The nginx.yaml looks like this:

    +
    apiVersion: apps/v1
    +kind: Deployment
    +metadata:
    +  name: mysite
    +  labels:
    +    app: mysite
    +spec:
    +  replicas: 1
    +  selector:
    +    matchLabels:
    +      app: mysite
    +  template:
    +    metadata:
    +      labels:
    +        app : mysite
    +    spec:
    +      containers:
    +        - name : mysite
    +          image: nginx
    +          ports:
    +            - containerPort: 80
    +
    +
    kubectl apply -f nginx.yaml
    +
    +
  • +
  • +

    Verify the nginx pod is in Running state:

    +
    sudo k3s kubectl get pods --all-namespaces
    +
    +
  • +
  • +

    Scale the pods to available agents:

    +
    sudo k3s kubectl scale --replicas=2 deploy/mysite
    +
    +
  • +
  • +

    View all deployment status:

    +
    sudo k3s kubectl get deploy mysite
    +
    +NAME     READY   UP-TO-DATE   AVAILABLE   AGE
    +mysite   2/2     2            2           85s
    +
    +
  • +
  • +

    Delete the nginx deployment and pod:

    +
    sudo k3s kubectl delete -f nginx.yaml
    +
    +

    OR,

    +
    sudo k3s kubectl delete deploy mysite
    +
    +
    +

    Note

    +
    +

    Instead of apply manually any new deployment yaml, you can just copy the +yaml file to the /var/lib/rancher/k3s/server/manifests/ folder +i.e. sudo cp nginx.yaml /var/lib/rancher/k3s/server/manifests/.. This +will automatically deploy the newly copied deployment on your cluster.

    +
  • +
+

Deploy Addons to K3s

+

K3s is a lightweight kubernetes tool that doesn’t come packaged with all the tools +but you can install them separately.

+
    +
  • +

    Install Helm Commandline tool on K3s:

    +

    i. Download the latest version of Helm commandline tool using wget from +this page.

    +
    wget https://get.helm.sh/helm-v3.7.0-linux-amd64.tar.gz
    +
    +

    ii. Unpack it:

    +
    tar -zxvf helm-v3.7.0-linux-amd64.tar.gz
    +
    +

    iii. Find the helm binary in the unpacked directory, and move it to its desired +destination

    +
    mv linux-amd64/helm /usr/bin/helm
    +chmod +x /usr/bin/helm
    +
    +

    OR,

    +

    Using Snap:

    +
    snap install helm --classic
    +
    +

    OR,

    +

    Using Apt (Debian/Ubuntu):

    +
    curl https://baltocdn.com/helm/signing.asc | sudo apt-key add -
    +sudo apt-get install apt-transport-https --yes
    +echo "deb https://baltocdn.com/helm/stable/debian/ all main" | sudo tee /etc/apt/sources.list.d/helm-stable-debian.list
    +sudo apt-get update
    +sudo apt-get install helm
    +
    +
  • +
  • +

    Verify the Helm installation:

    +
    helm version
    +
    +version.BuildInfo{Version:"v3.7.0", GitCommit:"eeac83883cb4014fe60267ec63735
    +70374ce770b", GitTreeState:"clean", GoVersion:"go1.16.8"}
    +
    +
  • +
  • +

    Add the helm chart repository to allow installation of applications using helm:

    +
    helm repo add stable https://charts.helm.sh/stable
    +helm repo update
    +
    +
  • +
+
+

Deploy A Sample Nginx Application using Helm

+

Nginx can be used as a web proxy to expose ingress +web traffic routes in and out of the cluster.

+
    +
  • +

    You can install "nginx web-proxy" using Helm:

    +
    export KUBECONFIG=/etc/rancher/k3s/k3s.yaml
    +helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
    +helm repo list
    +helm repo update
    +helm install stable ingress-nginx/ingress-nginx --namespace kube-system \
    +    --set defaultBackend.enabled=false --set controller.publishService.enabled=true
    +
    +
  • +
  • +

    We can test if the application has been installed by:

    +
    k3s kubectl get pods -n kube-system -l app=nginx-ingress -o wide
    +
    +NAME   READY STATUS  RESTARTS AGE  IP        NODE    NOMINATED NODE  READINESS GATES
    +nginx.. 1/1  Running 0        19m  10.42.1.5 k3s-worker1   <none>      <none>
    +
    +
  • +
  • +

    We have successfully deployed nginx web-proxy on k3s. Go to browser, visit http://<Master-Floating-IP> + i.e. http://128.31.25.246 to check the nginx default page.

    +
  • +
+

Upgrade K3s Using the Installation Script

+

To upgrade K3s from an older version you can re-run the installation script using +the same flags, for example:

+
curl -sfL https://get.k3s.io | sh -
+
+

This will upgrade to a newer version in the stable channel by default.

+

If you want to upgrade to a newer version in a specific channel (such as latest) +you can specify the channel:

+
curl -sfL https://get.k3s.io | INSTALL_K3S_CHANNEL=latest sh -
+
+

If you want to upgrade to a specific version you can run the following command:

+
curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=vX.Y.Z-rc1 sh -
+
+

From non root user's terminal to install the latest version, you do not need to +pass INSTALL_K3S_VERSION that by default loads the Latest version.

+
curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--write-kubeconfig-mode 644" \
+    sh -
+
+
+

Note

+

For more about on "How to use flags and environment variables" read this.

+
+

Restarting K3s

+

Restarting K3s is supported by the installation script for systemd and OpenRC.

+

Using systemd:

+

To restart servers manually:

+
sudo systemctl restart k3s
+
+

To restart agents manually:

+
sudo systemctl restart k3s-agent
+
+

Using OpenRC:

+

To restart servers manually:

+
sudo service k3s restart
+
+

To restart agents manually:

+
sudo service k3s-agent restart
+
+

Uninstalling

+

If you installed K3s with the help of the install.sh script, an uninstall script +is generated during installation. The script is created on your master node at +/usr/bin/k3s-uninstall.sh or as k3s-agent-uninstall.sh on your worker nodes.

+

To remove K3s on the worker nodes, execute:

+
sudo /usr/bin/k3s-agent-uninstall.sh
+sudo rm -rf /var/lib/rancher
+
+

To remove k3s on the master node, execute:

+
sudo /usr/bin/k3s-uninstall.sh
+sudo rm -rf /var/lib/rancher
+
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/other-tools/kubernetes/kind/index.html b/other-tools/kubernetes/kind/index.html new file mode 100644 index 00000000..d99df47a --- /dev/null +++ b/other-tools/kubernetes/kind/index.html @@ -0,0 +1,4697 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+
+
+ + + +
+
+
+ + + +
+
+ + + + + + + +

Kind

+

Pre-requisite

+

We will need 1 VM to create a single node kubernetes cluster using kind. +We are using following setting for this purpose:

+
    +
  • +

    1 Linux machine, almalinux-9-x86_64, cpu-su.2 flavor with 2vCPU, 8GB RAM, + 20GB storage - also assign Floating IP + to this VM.

    +
  • +
  • +

    setup Unique hostname to the machine using the following command:

    +
    echo "<node_internal_IP> <host_name>" >> /etc/hosts
    +hostnamectl set-hostname <host_name>
    +
    +

    For example:

    +
    echo "192.168.0.167 kind" >> /etc/hosts
    +hostnamectl set-hostname kind
    +
    +
  • +
+

Install docker on AlmaLinux

+

Run the below command on the AlmaLinux VM:

+
    +
  • +

    SSH into kind machine

    +
  • +
  • +

    Switch to root user: sudo su

    +
  • +
  • +

    Execute the below command to initialize the cluster:

    +

    Please remove container-tools module that includes stable versions of podman, +buildah, skopeo, runc, conmon, etc as well as dependencies and will be removed +with the module. If this module is not removed then it will conflict with Docker. +Red Hat does recommend Podman on RHEL 8.

    +
    dnf module remove container-tools
    +
    +dnf update -y
    +
    +dnf config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo
    +
    +dnf install docker-ce docker-ce-cli containerd.io docker-compose-plugin
    +
    +systemctl start docker
    +systemctl enable --now docker
    +systemctl status docker
    +
    +docker -v
    +
    +
  • +
+

Install kubectl on AlmaLinux

+
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
+sudo install -o root -g root -m 0755 kubectl /usr/bin/kubectl
+chmod +x /usr/bin/kubectl
+
+
    +
  • +

    Test to ensure that the kubectl is installed:

    +
    kubectl version --client
    +
    +
  • +
+

Install kind

+
curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.11.1/kind-linux-amd64
+chmod +x ./kind
+mv ./kind /usr/bin
+
+
which kind
+
+/bin/kind
+
+
kind version
+
+kind v0.11.1 go1.16.4 linux/amd64
+
+
    +
  • +

    To communicate with cluster, just give the cluster name as a context in kubectl:

    +
    kind create cluster --name k8s-kind-cluster1
    +
    +Creating cluster "k8s-kind-cluster1" ...
    +✓ Ensuring node image (kindest/node:v1.21.1) 🖼
    +✓ Preparing nodes 📦
    +✓ Writing configuration 📜
    +✓ Starting control-plane 🕹️
    +✓ Installing CNI 🔌
    +✓ Installing StorageClass 💾
    +Set kubectl context to "kind-k8s-kind-cluster1"
    +You can now use your cluster with:
    +
    +kubectl cluster-info --context kind-k8s-kind-cluster1
    +
    +Have a nice day! 👋
    +
    +
  • +
  • +

    Get the cluster details:

    +
    kubectl cluster-info --context kind-k8s-kind-cluster1
    +
    +Kubernetes control plane is running at https://127.0.0.1:38646
    +CoreDNS is running at https://127.0.0.1:38646/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
    +
    +To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
    +
    +
    kubectl get all
    +
    +NAME                TYPE       CLUSTER-IP  EXTERNAL-IP  PORT(S)  AGE
    +service/kubernetes  ClusterIP  10.96.0.1   <none>       443/TCP  5m25s
    +
    +
    kubectl get nodes
    +
    +NAME                             STATUS  ROLES                AGE    VERSION
    +k8s-kind-cluster1-control-plane  Ready  control-plane,master  5m26s  v1.21.1
    +
    +
  • +
+

Deleting a Cluster

+

If you created a cluster with kind create cluster then deleting is equally simple:

+
kind delete cluster
+
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/other-tools/kubernetes/kubeadm/HA-clusters-with-kubeadm/index.html b/other-tools/kubernetes/kubeadm/HA-clusters-with-kubeadm/index.html new file mode 100644 index 00000000..c2f0fc53 --- /dev/null +++ b/other-tools/kubernetes/kubeadm/HA-clusters-with-kubeadm/index.html @@ -0,0 +1,5707 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Highly Available Kubernetes Cluster using kubeadm

+

Objectives

+
    +
  • +

    Install a multi control-plane(master) Kubernetes cluster

    +
  • +
  • +

    Install a Pod network on the cluster so that your Pods can talk to each other

    +
  • +
  • +

    Deploy and test a sample app

    +
  • +
  • +

    Deploy K8s Dashboard to view all cluster's components

    +
  • +
+

Components and architecure

+

This shows components and architecture of a highly-available, production-grade +Kubernetes cluster.

+

Components and architecure

+

You can learn about each component from Kubernetes Componets.

+

Pre-requisite

+

You will need 2 control-plane(master node) and 2 worker nodes to create a +multi-master kubernetes cluster using kubeadm. You are going to use the +following set up for this purpose:

+
    +
  • +

    2 Linux machines for master, ubuntu-20.04-x86_64 or your choice of Ubuntu OS + image, cpu-su.2 flavor with 2vCPU, 8GB RAM, 20GB storage.

    +
  • +
  • +

    2 Linux machines for worker, ubuntu-20.04-x86_64 or your choice of Ubuntu OS + image, cpu-su.1 flavor with 1vCPU, 4GB RAM, 20GB storage - also + assign Floating IPs + to both of the worker nodes.

    +
  • +
  • +

    1 Linux machine for loadbalancer, ubuntu-20.04-x86_64 or your choice of Ubuntu + OS image, cpu-su.1 flavor with 1vCPU, 4GB RAM, 20GB storage.

    +
  • +
  • +

    ssh access to all machines: Read more here + on how to setup SSH to your remote VMs.

    +
  • +
  • +

    Create 2 security groups with appropriate ports and protocols:

    +
  • +
+

i. To be used by the master nodes:

+

Control plane ports and protocols

+

ii. To be used by the worker nodes:

+

Worker node ports and protocols

+
    +
  • +

    setup Unique hostname to each machine using the following command:

    +
    echo "<node_internal_IP> <host_name>" >> /etc/hosts
    +hostnamectl set-hostname <host_name>
    +
    +

    For example:

    +
    echo "192.168.0.167 loadbalancer" >> /etc/hosts
    +hostnamectl set-hostname loadbalancer
    +
    +
  • +
+

Steps

+
    +
  1. +

    Prepare the Loadbalancer node to communicate with the two master nodes' + apiservers on their IPs via port 6443.

    +
  2. +
  3. +

    Do following in all the nodes except the Loadbalancer node:

    +
      +
    • +

      Disable swap.

      +
    • +
    • +

      Install kubelet and kubeadm.

      +
    • +
    • +

      Install container runtime - you will be using containerd.

      +
    • +
    +
  4. +
  5. +

    Initiate kubeadm control plane configuration on one of the master nodes.

    +
  6. +
  7. +

    Save the new master and worker node join commands with the token.

    +
  8. +
  9. +

    Join the second master node to the control plane using the join command.

    +
  10. +
  11. +

    Join the worker nodes to the control plane using the join command.

    +
  12. +
  13. +

    Configure kubeconfig($HOME/.kube/config) on loadbalancer node.

    +
  14. +
  15. +

    Install kubectl on Loadbalancer node.

    +
  16. +
  17. +

    Install CNI network plugin i.e. Flannel on Loadbalancer node.

    +
  18. +
  19. +

    Validate all cluster components and nodes are visible on Loadbalancer node.

    +
  20. +
  21. +

    Deploy a sample app and validate the app from Loadbalancer node.

    +
  22. +
+
+

Setting up loadbalancer

+

You will use HAPROXY as the primary loadbalancer, but you can use any other +options as well. This node will be not part of the K8s cluster but will be +outside of the cluster and interacts with the cluster using ports.

+

You have 2 master nodes. Which means the user can connect to either of the 2 +apiservers. The loadbalancer will be used to loadbalance between the 2 apiservers.

+
    +
  • +

    Login to the loadbalancer node

    +
  • +
  • +

    Switch as root - sudo su

    +
  • +
  • +

    Update your repository and your system

    +
    sudo apt-get update && sudo apt-get upgrade -y
    +
    +
  • +
  • +

    Install haproxy

    +
    sudo apt-get install haproxy -y
    +
    +
  • +
  • +

    Edit haproxy configuration

    +
    vi /etc/haproxy/haproxy.cfg
    +
    +

    Add the below lines to create a frontend configuration for loadbalancer -

    +
    frontend fe-apiserver
    +bind 0.0.0.0:6443
    +mode tcp
    +option tcplog
    +default_backend be-apiserver
    +
    +

    Add the below lines to create a backend configuration for master1 and master2 +nodes at port 6443.

    +
    +

    Note

    +

    6443 is the default port of kube-apiserver

    +
    +
    backend be-apiserver
    +mode tcp
    +option tcplog
    +option tcp-check
    +balance roundrobin
    +default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
    +
    +    server master1 10.138.0.15:6443 check
    +    server master2 10.138.0.16:6443 check
    +
    +

    Here - master1 and master2 are the hostnames of the master nodes and +10.138.0.15 and 10.138.0.16 are the corresponding internal IP addresses.

    +
  • +
  • +

    Ensure haproxy config file is correctly formatted:

    +
    haproxy -c -q -V -f /etc/haproxy/haproxy.cfg
    +
    +
  • +
  • +

    Restart and Verify haproxy

    +
    systemctl restart haproxy
    +systemctl status haproxy
    +
    +

    Ensure haproxy is in running status.

    +

    Run nc command as below:

    +
    nc -v localhost 6443
    +Connection to localhost 6443 port [tcp/*] succeeded!
    +
    +
    +

    Note

    +

    If you see failures for master1 and master2 connectivity, you can ignore +them for time being as you have not yet installed anything on the servers.

    +
    +
  • +
+
+

Install kubeadm, kubelet and containerd on master and worker nodes

+

kubeadm will not install or manage kubelet or kubectl for you, so you will +need to ensure they match the version of the Kubernetes control plane you want kubeadm +to install for you. You will install these packages on all of your machines:

+

kubeadm: the command to bootstrap the cluster.

+

kubelet: the component that runs on all of the machines in your cluster and +does things like starting pods and containers.

+

kubectl: the command line util to talk to your cluster.

+

In this step, you will install kubelet and kubeadm on the below nodes

+
    +
  • +

    master1

    +
  • +
  • +

    master2

    +
  • +
  • +

    worker1

    +
  • +
  • +

    worker2

    +
  • +
+

The below steps will be performed on all the above mentioned nodes:

+
    +
  • +

    SSH into all the 4 machines

    +
  • +
  • +

    Update the repositories and packages:

    +
    sudo apt-get update && sudo apt-get upgrade -y
    +
    +
  • +
  • +

    Turn off swap

    +
    swapoff -a
    +sudo sed -i '/ swap / s/^/#/' /etc/fstab
    +
    +
  • +
  • +

    Install curl and apt-transport-https

    +
    sudo apt-get update && sudo apt-get install -y apt-transport-https curl
    +
    +
  • +
  • +

    Download the Google Cloud public signing key and add key to verify releases

    +
    curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
    +
    +
  • +
  • +

    add kubernetes apt repo

    +
    cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
    +deb https://apt.kubernetes.io/ kubernetes-xenial main
    +EOF
    +
    +
  • +
  • +

    Install kubelet and kubeadm

    +
    sudo apt-get update
    +sudo apt-get install -y kubelet kubeadm
    +
    +
  • +
  • +

    apt-mark hold is used so that these packages will not be updated/removed automatically

    +
    sudo apt-mark hold kubelet kubeadm
    +
    +
  • +
+
+

Install the container runtime i.e. containerd on master and worker nodes

+

To run containers in Pods, Kubernetes uses a container runtime.

+

By default, Kubernetes uses the Container Runtime Interface (CRI) to interface +with your chosen container runtime.

+
    +
  • +

    Install container runtime - containerd

    +

    The first thing to do is configure the persistent loading of the necessary +containerd modules. This forwarding IPv4 and letting iptables see bridged +trafficis is done with the following command:

    +
    cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
    +overlay
    +br_netfilter
    +EOF
    +
    +sudo modprobe overlay
    +sudo modprobe br_netfilter
    +
    +
  • +
  • +

    Ensure net.bridge.bridge-nf-call-iptables is set to 1 in your sysctl config:

    +
    # sysctl params required by setup, params persist across reboots
    +cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
    +net.bridge.bridge-nf-call-iptables  = 1
    +net.bridge.bridge-nf-call-ip6tables = 1
    +net.ipv4.ip_forward                 = 1
    +EOF
    +
    +
  • +
  • +

    Apply sysctl params without reboot:

    +
    sudo sysctl --system
    +
    +
  • +
  • +

    Install the necessary dependencies with:

    +
    sudo apt install -y curl gnupg2 software-properties-common apt-transport-https ca-certificates
    +
    +
  • +
  • +

    The containerd.io packages in DEB and RPM formats are distributed by Docker. + Add the required GPG key with:

    +
    curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
    +sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
    +
    +

    It's now time to Install and configure containerd:

    +
    sudo apt update -y
    +sudo apt install -y containerd.io
    +containerd config default | sudo tee /etc/containerd/config.toml
    +
    +# Reload the systemd daemon with
    +sudo systemctl daemon-reload
    +
    +# Start containerd
    +sudo systemctl restart containerd
    +sudo systemctl enable --now containerd
    +
    +

    You can verify containerd is running with the command:

    +
    sudo systemctl status containerd
    +
    +
  • +
+
+

Configure kubeadm to bootstrap the cluster

+

You will start off by initializing only one master node. For this purpose, you +choose master1 to initialize our first control plane but you can also do the +same in master2.

+
    +
  • +

    SSH into master1 machine

    +
  • +
  • +

    Switch to root user: sudo su

    +
    +

    Configuring the kubelet cgroup driver

    +

    From 1.22 onwards, if you do not set the cgroupDriver field under +KubeletConfiguration, kubeadm will default it to systemd. So you do +not need to do anything here by default but if you want you change it you can +refer to this documentation.

    +
    +
  • +
  • +

    Execute the below command to initialize the cluster:

    +
    kubeadm config images pull
    +kubeadm init --control-plane-endpoint
    +"LOAD_BALANCER_IP_OR_HOSTNAME:LOAD_BALANCER_PORT" --upload-certs --pod-network-cidr=10.244.0.0/16
    +
    +

    Here, you can use either the IP address or the hostname of the loadbalancer in +place of . You have not enabled the hostname of +the server, i.e. loadbalancer as the LOAD_BALANCER_IP_OR_HOSTNAME that is +visible from the master1 node. so instead of using not resolvable hostnames +across your network, you will be using the IP address of the Loadbalancer server.

    +

    The is the front end configuration port defined in HAPROXY +configuration. For this, you have kept the port as 6443 which is the default +apiserver port.

    +
    +

    Important Note

    +

    --pod-network-cidr value depends upon what CNI plugin you going to use so +need to be very careful while setting this CIDR values. In our case, you are +going to use Flannel CNI network plugin so you will use: +--pod-network-cidr=10.244.0.0/16. If you are opted to use Calico CNI +network plugin then you need to use: --pod-network-cidr=192.168.0.0/16 and +if you are opted to use Weave Net no need to pass this parameter.

    +
    +

    For example, our Flannel CNI network plugin based kubeadm init command with +loadbalancer node with internal IP: 192.168.0.167 look like below:

    +
    kubeadm config images pull
    +kubeadm init --control-plane-endpoint "192.168.0.167:6443" --upload-certs --pod-network-cidr=10.244.0.0/16
    +
    +

    Save the output in some secure file for future use. This will show an unique +token to join the control plane. The output from kubeadm init should looks +like below:

    +
    Your Kubernetes control-plane has initialized successfully!
    +
    +To start using your cluster, you need to run the following as a regular user:
    +
    +mkdir -p $HOME/.kube
    +sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    +sudo chown $(id -u):$(id -g) $HOME/.kube/config
    +
    +Alternatively, if you are the root user, you can run:
    +
    +export KUBECONFIG=/etc/kubernetes/admin.conf
    +
    +You should now deploy a pod network to the cluster.
    +Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
    +https://kubernetes.io/docs/concepts/cluster-administration/addons/
    +
    +You can now join any number of the control-plane node running the following
    +command on each worker nodes as root:
    +
    +kubeadm join 192.168.0.167:6443 --token cnslau.kd5fjt96jeuzymzb \
    +    --discovery-token-ca-cert-hash sha256:871ab3f050bc9790c977daee9e44cf52e15ee3
    +    7ab9834567333b939458a5bfb5 \
    +    --control-plane --certificate-key 824d9a0e173a810416b4bca7038fb33b616108c17abcbc5eaef8651f11e3d146
    +
    +Please note that the certificate-key gives access to cluster sensitive data, keep
    +it secret!
    +As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you
    +can use "kubeadm init phase upload-certs --upload-certs" to reload certs afterward.
    +
    +Then you can join any number of worker nodes by running the following on each as
    +root:
    +
    +kubeadm join 192.168.0.167:6443 --token cnslau.kd5fjt96jeuzymzb \
    +    --discovery-token-ca-cert-hash sha256:871ab3f050bc9790c977daee9e44cf52e15ee37ab9834567333b939458a5bfb5
    +
    +

    The output consists of 3 major tasks:

    +

    A. Setup kubeconfig using on current master node: +As you are running as root user so you need to run the following command:

    +
    export KUBECONFIG=/etc/kubernetes/admin.conf
    +
    +

    We need to run the below commands as a normal user to use the kubectl from terminal.

    +
    mkdir -p $HOME/.kube
    +sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    +sudo chown $(id -u):$(id -g) $HOME/.kube/config
    +
    +

    Now the machine is initialized as master.

    +
    +

    Warning

    +

    Kubeadm signs the certificate in the admin.conf to have +Subject: O = system:masters, CN = kubernetes-admin. system:masters is a +break-glass, super user group that bypasses the authorization layer +(e.g. RBAC). Do not share the admin.conf file with anyone and instead +grant users custom permissions by generating them a kubeconfig file using +the kubeadm kubeconfig user command.

    +
    +

    B. Setup a new control plane (master) i.e. master2 by running following +command on master2 node:

    +
    kubeadm join 192.168.0.167:6443 --token cnslau.kd5fjt96jeuzymzb \
    +    --discovery-token-ca-cert-hash sha256:871ab3f050bc9790c977daee9e44cf52e1
    +        5ee37ab9834567333b939458a5bfb5 \
    +    --control-plane --certificate-key 824d9a0e173a810416b4bca7038fb33b616108c17abcbc5eaef8651f11e3d146
    +
    +

    C. Join worker nodes running following command on individual worker nodes:

    +
    kubeadm join 192.168.0.167:6443 --token cnslau.kd5fjt96jeuzymzb \
    +    --discovery-token-ca-cert-hash sha256:871ab3f050bc9790c977daee9e44cf52e15ee37ab9834567333b939458a5bfb5
    +
    +
    +

    Important Note

    +

    Your output will be different than what is provided here. While +performing the rest of the demo, ensure that you are executing the +command provided by your output and dont copy and paste from here.

    +
    +

    If you do not have the token, you can get it by running the following command +on the control-plane node:

    +
    kubeadm token list
    +
    +

    The output is similar to this:

    +
    TOKEN     TTL  EXPIRES      USAGES           DESCRIPTION            EXTRA GROUPS
    +8ewj1p... 23h  2018-06-12   authentication,  The default bootstrap  system:
    +                            signing          token generated by     bootstrappers:
    +                                            'kubeadm init'.         kubeadm:
    +                                                                    default-node-token
    +
    +

    If you missed the join command, execute the following command +kubeadm token create --print-join-command in the master node to recreate the +token with the join command.

    +
    root@master:~$ kubeadm token create --print-join-command
    +
    +kubeadm join 10.2.0.4:6443 --token xyzeyi.wxer3eg9vj8hcpp2 \
    +--discovery-token-ca-cert-hash sha256:ccfc92b2a31b002c3151cdbab77ff4dc32ef13b213fa3a9876e126831c76f7fa
    +
    +

    By default, tokens expire after 24 hours. If you are joining a node to the cluster +after the current token has expired, you can create a new token by running the +following command on the control-plane node:

    +
    kubeadm token create
    +
    +

    The output is similar to this: +5didvk.d09sbcov8ph2amjw

    +

    We can use this new token to join:

    +
    kubeadm join <master-ip>:<master-port> --token <token> \
    +    --discovery-token-ca-cert-hash sha256:<hash>
    +
    +
  • +
+
+
    +
  • +

    SSH into master2

    +
  • +
  • +

    Switch to root user:sudo su

    +
  • +
  • +

    Check the command provided by the output of master1:

    +

    You can now use the below command to add another control-plane node(master) to +the control plane:

    +
    kubeadm join 192.168.0.167:6443 --token cnslau.kd5fjt96jeuzymzb
    +    --discovery-token-ca-cert-hash sha256:871ab3f050bc9790c977daee9e44cf52e15ee3
    +    7ab9834567333b939458a5bfb5 \
    +    --control-plane --certificate-key 824d9a0e173a810416b4bca7038fb33b616108c17abcbc5eaef8651f11e3d146
    +
    +
  • +
  • +

    Execute the kubeadm join command for control plane on master2

    +

    Your output should look like:

    +
    This node has joined the cluster and a new control plane instance was created:
    +
    +* Certificate signing request was sent to apiserver and approval was received.
    +* The Kubelet was informed of the new secure connection details.
    +* Control plane (master) label and taint were applied to the new node.
    +* The Kubernetes control plane instances scaled up.
    +* A new etcd member was added to the local/stacked etcd cluster.
    +
    +
  • +
+

Now that you have initialized both the masters - you can now work on +bootstrapping the worker nodes.

+
    +
  • +

    SSH into worker1 and worker2

    +
  • +
  • +

    Switch to root user on both the machines: sudo su

    +
  • +
  • +

    Check the output given by the init command on master1 to join worker node:

    +
    kubeadm join 192.168.0.167:6443 --token cnslau.kd5fjt96jeuzymzb \
    +    --discovery-token-ca-cert-hash sha256:871ab3f050bc9790c977daee9e44cf52e15ee37ab9834567333b939458a5bfb5
    +
    +
  • +
  • +

    Execute the above command on both the nodes:

    +
  • +
  • +

    Your output should look like:

    +
    This node has joined the cluster:
    +* Certificate signing request was sent to apiserver and a response was received.
    +* The Kubelet was informed of the new secure connection details.
    +
    +
  • +
+
+

Configure kubeconfig on loadbalancer node

+

Now that you have configured the master and the worker nodes, its now time to +configure Kubeconfig (.kube) on the loadbalancer node. It is completely up to +you if you want to use the loadbalancer node to setup kubeconfig. kubeconfig can +also be setup externally on a separate machine which has access to loadbalancer +node. For the purpose of this demo you will use loadbalancer node to host +kubeconfig and kubectl.

+
    +
  • +

    SSH into loadbalancer node

    +
  • +
  • +

    Switch to root user: sudo su

    +
  • +
  • +

    Create a directory: .kube at $HOME of root user

    +
    mkdir -p $HOME/.kube
    +
    +
  • +
  • +

    SCP configuration file from any one master node to loadbalancer node

    +
    scp master1:/etc/kubernetes/admin.conf $HOME/.kube/config
    +
    +
    +

    Important Note

    +

    If you havent setup ssh connection between master node and loadbalancer, you +can manually copy the contents of the file /etc/kubernetes/admin.conf from +master1 node and then paste it to $HOME/.kube/config file on the +loadbalancer node. Ensure that the kubeconfig file path is +$HOME/.kube/config on the loadbalancer node.

    +
    +
  • +
  • +

    Provide appropriate ownership to the copied file

    +
    chown $(id -u):$(id -g) $HOME/.kube/config
    +
    +
  • +
+
+

Install kubectl

+
    +
  • +

    Install kubectl binary

    +

    kubectl: the command line util to talk to your cluster.

    +
    snap install kubectl --classic
    +
    +

    This outputs:

    +
    kubectl 1.26.1 from Canonical✓ installed
    +
    +
  • +
  • +

    Verify the cluster

    +
    kubectl get nodes
    +
    +NAME STATUS ROLES AGE VERSION
    +master1 NotReady control-plane,master 21m v1.26.1
    +master2 NotReady control-plane,master 15m v1.26.1
    +worker1 Ready <none> 9m17s v1.26.1
    +worker2 Ready <none> 9m25s v1.26.1
    +
    +
  • +
+
+

Install CNI network plugin

+

CNI overview

+

Managing a network where containers can interoperate efficiently is very +important. Kubernetes has adopted the Container Network Interface(CNI) +specification for managing network resources on a cluster. This relatively +simple specification makes it easy for Kubernetes to interact with a wide range +of CNI-based software solutions. Using this CNI plugin allows Kubernetes pods to +have the same IP address inside the pod as they do on the VPC network. Make sure +the configuration corresponds to the Pod CIDR specified in the kubeadm +configuration file if applicable.

+

You must deploy a CNI based Pod network add-on so that your Pods can communicate +with each other. Cluster DNS (CoreDNS) will not start up before a network is +installed. To verify you can run this command: kubectl get po -n kube-system:

+

You should see the following output. You will see the two coredns-* pods in a +pending state. It is the expected behavior. Once we install the network plugin, +it will be in a Running state.

+

Output Example:

+
root@loadbalancer:~$ kubectl get po -n kube-system
+ NAME                               READY  STATUS   RESTARTS  AGE
+coredns-558bd4d5db-5jktc             0/1   Pending   0        10m
+coredns-558bd4d5db-xdc5x             0/1   Pending   0        10m
+etcd-master1                         1/1   Running   0        11m
+kube-apiserver-master1               1/1   Running   0        11m
+kube-controller-manager-master1      1/1   Running   0        11m
+kube-proxy-5jfh5                     1/1   Running   0        10m
+kube-scheduler-master1               1/1   Running   0        11m
+
+

Supported CNI options

+

To read more about the currently supported base CNI solutions for Kubernetes +read here +and also read this.

+

The below command can be run on the Loadbalancer node to install the CNI plugin:

+
kubectl apply -f https://github.com/coreos/flannel/raw/master/Documentation/kube-flannel.yml
+
+

As you had passed --pod-network-cidr=10.244.0.0/16 with kubeadm init so this +should work for Flannel CNI.

+
+

Using Other CNI Options

+

For Calico CNI plugin to work correctly, you need to pass +--pod-network-cidr=192.168.0.0/16 with kubeadm init and then you can run: +kubectl apply -f https://docs.projectcalico.org/v3.8/manifests/calico.yaml

+
+
+

For Weave Net CNI plugin to work correctly, you don't need to pass +--pod-network-cidr with kubeadm init and then you can run: +kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl + version | base64 | tr -d '\n')"

+

Dual Network:

+

It is highly recommended to follow an internal/external network layout for your +cluster, as showed in this diagram:

+

Dual Network Diagram

+

To enable this just give two different names to the internal and external interface, +according to your distro of choiche naming scheme:

+
external_interface: eth0
+internal_interface: eth1
+
+

Also you can decide here what CIDR should your cluster use

+
cluster_cidr: 10.43.0.0/16
+service_cidr: 10.44.0.0/16
+
+

Once you successfully installed the Flannel CNI component to your cluster. +You can now verify your HA cluster running:

+
kubectl get nodes
+
+NAME      STATUS   ROLES                    AGE   VERSION
+master1   Ready    control-plane,master     22m   v1.26.1
+master2   Ready    control-plane,master     17m   v1.26.1
+worker1   Ready    <none>                   10m   v1.26.1
+worker2   Ready    <none>                   10m   v1.26.1
+
+
+

Deploy A Sample Nginx Application From one of the master nodes

+

Now that we have all the components to make the cluster and applications work, +let’s deploy a sample Nginx application and see if we can access it over a +NodePort +that has port range of 30000-32767.

+

The below command can be run on:

+
kubectl run nginx --image=nginx --port=80
+kubectl expose pod nginx --port=80 --type=NodePort
+
+

To check which NodePort is opened and running the Nginx run:

+
kubectl get svc
+
+

The output will show:

+

Running Services

+

Once the deployment is up, you should be able to access the Nginx home page on +the allocated NodePort from either of the worker nodes' Floating IP.

+

To check which worker node is serving nginx, you can check NODE column +running the following command:

+
kubectl get pods --all-namespaces --output wide
+
+

OR,

+
kubectl get pods -A -o wide
+
+

This will show like below:

+

Nginx Pod and Worker

+

Go to browser, visit http://<Worker-Floating-IP>:<NodePort> +i.e. http://128.31.25.246:32713 to check the nginx default page. +Here Worker_Floating-IP corresponds to the Floating IP of the nginx pod +running worker node i.e. worker2.

+

For your example,

+

nginx default page

+
+

Deploy A K8s Dashboard

+

You will going to setup K8dash/Skooner +to view a dashboard that shows all your K8s cluster components.

+
    +
  • +

    SSH into loadbalancer node

    +
  • +
  • +

    Switch to root user: sudo su

    +
  • +
  • +

    Apply available deployment by running the following command:

    +
    kubectl apply -f https://raw.githubusercontent.com/skooner-k8s/skooner/master/kubernetes-skooner-nodeport.yaml
    +
    +

    This will map Skooner port 4654 to a randomly selected port on the running +node.

    +

    The assigned NodePort can be found running:

    +
    kubectl get svc --namespace=kube-system
    +
    +

    OR,

    +
    kubectl get po,svc -n kube-system
    +
    +

    Skooner Service Port

    +

    To check which worker node is serving skooner-*, you can check NODE column +running the following command:

    +
    kubectl get pods --all-namespaces --output wide
    +
    +

    OR,

    +
    kubectl get pods -A -o wide
    +
    +

    This will show like below:

    +

    Skooner Pod and Worker

    +

    Go to browser, visit http://<Worker-Floating-IP>:<NodePort> i.e. +http://128.31.25.246:30495 to check the skooner dashboard page. +Here Worker_Floating-IP corresponds to the Floating IP of the skooner-* pod +running worker node i.e. worker2.

    +

    Skooner Dashboard

    +
  • +
+

Setup the Service Account Token to access the Skooner Dashboard:

+

The first (and easiest) option is to create a dedicated service account. Run the +following commands:

+
    +
  • +

    Create the service account in the current namespace (we assume default)

    +
    kubectl create serviceaccount skooner-sa
    +
    +
  • +
  • +

    Give that service account root on the cluster

    +
    kubectl create clusterrolebinding skooner-sa --clusterrole=cluster-admin --serviceaccount=default:skooner-sa
    +
    +
  • +
  • +

    Create a secret that was created to hold the token for the SA:

    +
    kubectl apply -f - <<EOF
    +apiVersion: v1
    +kind: Secret
    +metadata:
    +    name: skooner-sa-token
    +    annotations:
    +        kubernetes.io/service-account.name: skooner-sa
    +type: kubernetes.io/service-account-token
    +EOF
    +
    +
    +

    Information

    +

    Since 1.22, this type of Secret is no longer used to mount credentials into +Pods, and obtaining tokens via the TokenRequest API +is recommended instead of using service account token Secret objects. Tokens +obtained from the TokenRequest API are more secure than ones stored in Secret +objects, because they have a bounded lifetime and are not readable by other API +clients. You can use the kubectl create token command to obtain a token from +the TokenRequest API. For example: kubectl create token skooner-sa, where +skooner-sa is service account name.

    +
    +
  • +
  • +

    Find the secret that was created to hold the token for the SA

    +
    kubectl get secrets
    +
    +
  • +
  • +

    Show the contents of the secret to extract the token

    +
    kubectl describe secret skooner-sa-token
    +
    +
  • +
+

Copy the token value from the secret detail and enter it into the login screen +to access the dashboard.

+

Watch Demo Video showing how to setup the cluster

+

Here’s a recorded demo video +on how to setup HA K8s cluster using kubeadm as +explained above.

+
+

Very Important: Certificates Renewal

+

Client certificates generated by kubeadm expire after one year unless the +Kubernetes version is upgraded or the certificates are manually renewed.

+

To renew certificates manually, you can use the kubeadm certs renew command with +the appropriate command line options. After running the command, you should +restart the control plane Pods.

+

kubeadm certs renew can renew any specific certificate or, with the subcommand +all, it can renew all of them, as shown below:

+
kubeadm certs renew all
+
+

Once renewing certificates is done. You must restart the kube-apiserver, +kube-controller-manager, kube-scheduler and etcd, so that they can use the +new certificates by running:

+
systemctl restart kubelet
+
+

Then, update the new kube config file:

+
export KUBECONFIG=/etc/kubernetes/admin.conf
+sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
+
+
+

Don't Forget to Update the older kube config file

+

Update wherever you are using the older kube config to connect with the cluster.

+
+

Clean Up

+
    +
  • +

    To view the Cluster info:

    +
    kubectl cluster-info
    +
    +
  • +
  • +

    To delete your local references to the cluster:

    +
    kubectl config delete-cluster
    +
    +
  • +
+

How to Remove the node?

+

Talking to the control-plane node with the appropriate credentials, run:

+
kubectl drain <node name> --delete-emptydir-data --force --ignore-daemonsets
+
+
    +
  • +

    Before removing the node, reset the state installed by kubeadm:

    +
    kubeadm reset
    +
    +

    The reset process does not reset or clean up iptables rules or IPVS tables. If +you wish to reset iptables, you must do so manually:

    +
    iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
    +
    +

    If you want to reset the IPVS tables, you must run the following command:

    +
    ipvsadm -C
    +
    +
  • +
  • +

    Now remove the node:

    +
    kubectl delete node <node name>
    +
    +
  • +
+

If you wish to start over, run kubeadm init or kubeadm join with the +appropriate arguments.

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/other-tools/kubernetes/kubeadm/single-master-clusters-with-kubeadm/index.html b/other-tools/kubernetes/kubeadm/single-master-clusters-with-kubeadm/index.html new file mode 100644 index 00000000..b5ef39d4 --- /dev/null +++ b/other-tools/kubernetes/kubeadm/single-master-clusters-with-kubeadm/index.html @@ -0,0 +1,5495 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Creating a Single Master cluster with kubeadm

+

Objectives

+
    +
  • +

    Install a single control-plane(master) Kubernetes cluster

    +
  • +
  • +

    Install a Pod network on the cluster so that your Pods can talk to each other

    +
  • +
  • +

    Deploy and test a sample app

    +
  • +
  • +

    Deploy K8s Dashboard to view all cluster's components

    +
  • +
+

Components and architecure

+

Components and architecure

+

You can learn about each component from Kubernetes Componets.

+

Pre-requisite

+

We will need 1 control-plane(master) and 2 worker node to create a single +control-plane kubernetes cluster using kubeadm. We are using following setting +for this purpose:

+
    +
  • +

    1 Linux machine for master, ubuntu-20.04-x86_64, cpu-su.2 flavor with 2vCPU, + 8GB RAM, 20GB storage.

    +
  • +
  • +

    2 Linux machines for worker, ubuntu-20.04-x86_64, cpu-su.1 flavor with 1vCPU, + 4GB RAM, 20GB storage - also assign Floating IPs + to both of the worker nodes.

    +
  • +
  • +

    ssh access to all machines: Read more here + on how to set up SSH on your remote VMs.

    +
  • +
  • +

    Create 2 security groups with appropriate ports and protocols:

    +
  • +
+

i. To be used by the master nodes:

+

Control plane ports and protocols

+

ii. To be used by the worker nodes:

+

Worker node ports and protocols

+
    +
  • +

    setup Unique hostname to each machine using the following command:

    +
    echo "<node_internal_IP> <host_name>" >> /etc/hosts
    +hostnamectl set-hostname <host_name>
    +
    +

    For example:

    +
    echo "192.168.0.167 master" >> /etc/hosts
    +hostnamectl set-hostname master
    +
    +
  • +
+

Steps

+
    +
  1. +

    Disable swap on all nodes.

    +
  2. +
  3. +

    Install kubeadm, kubelet, and kubectl on all the nodes.

    +
  4. +
  5. +

    Install container runtime on all nodes- you will be using containerd.

    +
  6. +
  7. +

    Initiate kubeadm control plane configuration on the master node.

    +
  8. +
  9. +

    Save the worker node join command with the token.

    +
  10. +
  11. +

    Install CNI network plugin i.e. Flannel on master node.

    +
  12. +
  13. +

    Join the worker node to the master node (control plane) using the join command.

    +
  14. +
  15. +

    Validate all cluster components and nodes are visible on master node.

    +
  16. +
  17. +

    Deploy a sample app and validate the app from master node.

    +
  18. +
+

Install kubeadm, kubelet and containerd on master and worker nodes

+

kubeadm will not install or manage kubelet or kubectl for you, so you will +need to ensure they match the version of the Kubernetes control plane you want kubeadm +to install for you. You will install these packages on all of your machines:

+

kubeadm: the command to bootstrap the cluster.

+

kubelet: the component that runs on all of the machines in your cluster and +does things like starting pods and containers.

+

kubectl: the command line util to talk to your cluster.

+

In this step, you will install kubelet and kubeadm on the below nodes

+
    +
  • +

    master

    +
  • +
  • +

    worker1

    +
  • +
  • +

    worker2

    +
  • +
+

The below steps will be performed on all the above mentioned nodes:

+
    +
  • +

    SSH into all the 3 machines

    +
  • +
  • +

    Update the repositories and packages:

    +
    sudo apt-get update && sudo apt-get upgrade -y
    +
    +
  • +
  • +

    Turn off swap

    +
    swapoff -a
    +sudo sed -i '/ swap / s/^/#/' /etc/fstab
    +
    +
  • +
  • +

    Install curl and apt-transport-https

    +
    sudo apt-get update && sudo apt-get install -y apt-transport-https curl
    +
    +
  • +
  • +

    Download the Google Cloud public signing key and add key to verify releases

    +
    curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
    +
    +
  • +
  • +

    add kubernetes apt repo

    +
    cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list
    +deb https://apt.kubernetes.io/ kubernetes-xenial main
    +EOF
    +
    +
  • +
  • +

    Install kubelet, kubeadm, and kubectl

    +
    sudo apt-get update
    +sudo apt-get install -y kubelet kubeadm kubectl
    +
    +
  • +
  • +

    apt-mark hold is used so that these packages will not be updated/removed automatically

    +
    sudo apt-mark hold kubelet kubeadm kubectl
    +
    +
  • +
+
+

Install the container runtime i.e. containerd on master and worker nodes

+

To run containers in Pods, Kubernetes uses a container runtime.

+

By default, Kubernetes uses the Container Runtime Interface (CRI) to interface +with your chosen container runtime.

+
    +
  • +

    Install container runtime - containerd

    +

    The first thing to do is configure the persistent loading of the necessary +containerd modules. This forwarding IPv4 and letting iptables see bridged +trafficis is done with the following command:

    +
    cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
    +overlay
    +br_netfilter
    +EOF
    +
    +sudo modprobe overlay
    +sudo modprobe br_netfilter
    +
    +
  • +
  • +

    Ensure net.bridge.bridge-nf-call-iptables is set to 1 in your sysctl config:

    +
    # sysctl params required by setup, params persist across reboots
    +cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
    +net.bridge.bridge-nf-call-iptables  = 1
    +net.bridge.bridge-nf-call-ip6tables = 1
    +net.ipv4.ip_forward                 = 1
    +EOF
    +
    +
  • +
  • +

    Apply sysctl params without reboot:

    +
    sudo sysctl --system
    +
    +
  • +
  • +

    Install the necessary dependencies with:

    +
    sudo apt install -y curl gnupg2 software-properties-common apt-transport-https ca-certificates
    +
    +
  • +
  • +

    The containerd.io packages in DEB and RPM formats are distributed by Docker. + Add the required GPG key with:

    +
    curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
    +sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
    +
    +

    It's now time to Install and configure containerd:

    +
    sudo apt update -y
    +sudo apt install -y containerd.io
    +containerd config default | sudo tee /etc/containerd/config.toml
    +
    +# Reload the systemd daemon with
    +sudo systemctl daemon-reload
    +
    +# Start containerd
    +sudo systemctl restart containerd
    +sudo systemctl enable --now containerd
    +
    +

    You can verify containerd is running with the command:

    +
    sudo systemctl status containerd
    +
    +
    +

    Configuring the kubelet cgroup driver

    +

    From 1.22 onwards, if you do not set the cgroupDriver field under +KubeletConfiguration, kubeadm will default it to systemd. So you do +not need to do anything here by default but if you want you change it you +can refer to this documentation.

    +
    +
  • +
+
+

Configure kubeadm to bootstrap the cluster on master node

+

Run the below command on the master node i.e. master that you want to setup as +control plane.

+
    +
  • +

    SSH into master machine

    +
  • +
  • +

    Switch to root user: sudo su

    +
  • +
  • +

    Execute the below command to initialize the cluster:

    +
    export MASTER_IP=<Master-Internal-IP>
    +kubeadm config images pull
    +kubeadm init --apiserver-advertise-address=${MASTER_IP} --pod-network-cidr=10.244.0.0/16
    +
    +
    +

    Important Note

    +

    Please make sure you replace the correct IP of the node with +<Master-Internal-IP> which is the Internal IP of master node. +--pod-network-cidr value depends upon what CNI plugin you going to use +so need to be very careful while setting this CIDR values. In our case, +you are going to use Flannel CNI network plugin so you will use: +--pod-network-cidr=10.244.0.0/16. If you are opted to use Calico CNI +network plugin then you need to use: --pod-network-cidr=192.168.0.0/16 +and if you are opted to use Weave Net no need to pass this parameter.

    +
    +

    For example, our Flannel CNI network plugin based kubeadm init command with +master node with internal IP: 192.168.0.167 look like below:

    +

    For example:

    +
    export MASTER_IP=192.168.0.167
    +kubeadm config images pull
    +kubeadm init --apiserver-advertise-address=${MASTER_IP} --pod-network-cidr=10.244.0.0/16
    +
    +

    Save the output in some secure file for future use. This will show an unique +token to join the control plane. The output from kubeadm init should looks +like below:

    +
    Your Kubernetes control-plane has initialized successfully!
    +
    +To start using your cluster, you need to run the following as a regular user:
    +
    +mkdir -p $HOME/.kube
    +sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    +sudo chown $(id -u):$(id -g) $HOME/.kube/config
    +
    +Alternatively, if you are the root user, you can run:
    +
    +export KUBECONFIG=/etc/kubernetes/admin.conf
    +
    +You should now deploy a pod network to the cluster.
    +Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
    +https://kubernetes.io/docs/concepts/cluster-administration/addons/
    +
    +You can now join any number of the control-plane node running the following
    +command on each worker nodes as root:
    +
    +kubeadm join 192.168.0.167:6443 --token cnslau.kd5fjt96jeuzymzb \
    +    --discovery-token-ca-cert-hash sha256:871ab3f050bc9790c977daee9e44cf52e15ee3
    +    7ab9834567333b939458a5bfb5 \
    +    --control-plane --certificate-key 824d9a0e173a810416b4bca7038fb33b616108c17abcbc5eaef8651f11e3d146
    +
    +Please note that the certificate-key gives access to cluster sensitive data, keep
    +it secret!
    +As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you
    +can use "kubeadm init phase upload-certs --upload-certs" to reload certs afterward.
    +
    +Then you can join any number of worker nodes by running the following on each as
    +root:
    +
    +kubeadm join 192.168.0.167:6443 --token cnslau.kd5fjt96jeuzymzb \
    +    --discovery-token-ca-cert-hash sha256:871ab3f050bc9790c977daee9e44cf52e15ee37ab9834567333b939458a5bfb5
    +
    +

    The output consists of 2 major tasks:

    +

    A. Setup kubeconfig using on current master node: +As you are running as root user so you need to run the following command:

    +
    export KUBECONFIG=/etc/kubernetes/admin.conf
    +
    +

    We need to run the below commands as a normal user to use the kubectl from terminal.

    +
    mkdir -p $HOME/.kube
    +sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    +sudo chown $(id -u):$(id -g) $HOME/.kube/config
    +
    +

    Now the machine is initialized as master.

    +
    +

    Warning

    +

    Kubeadm signs the certificate in the admin.conf to have +Subject: O = system:masters, CN = kubernetes-admin. system:masters is a +break-glass, super user group that bypasses the authorization layer +(e.g. RBAC). Do not share the admin.conf file with anyone and instead +grant users custom permissions by generating them a kubeconfig file using +the kubeadm kubeconfig user command.

    +
    +

    B. Join worker nodes running following command on individual worker nodes:

    +
    kubeadm join 192.168.0.167:6443 --token cnslau.kd5fjt96jeuzymzb \
    +    --discovery-token-ca-cert-hash sha256:871ab3f050bc9790c977daee9e44cf52e15ee37ab9834567333b939458a5bfb5
    +
    +
    +

    Important Note

    +

    Your output will be different than what is provided here. While +performing the rest of the demo, ensure that you are executing the +command provided by your output and dont copy and paste from here.

    +
    +

    If you do not have the token, you can get it by running the following command +on the control-plane node:

    +
    kubeadm token list
    +
    +

    The output is similar to this:

    +
    TOKEN     TTL  EXPIRES      USAGES           DESCRIPTION            EXTRA GROUPS
    +8ewj1p... 23h  2018-06-12   authentication,  The default bootstrap  system:
    +                            signing          token generated by     bootstrappers:
    +                                            'kubeadm init'.         kubeadm:
    +                                                                    default-node-token
    +
    +

    If you missed the join command, execute the following command +kubeadm token create --print-join-command in the master node to recreate the +token with the join command.

    +
    root@master:~$ kubeadm token create --print-join-command
    +
    +kubeadm join 10.2.0.4:6443 --token xyzeyi.wxer3eg9vj8hcpp2 \
    +--discovery-token-ca-cert-hash sha256:ccfc92b2a31b002c3151cdbab77ff4dc32ef13b213fa3a9876e126831c76f7fa
    +
    +

    By default, tokens expire after 24 hours. If you are joining a node to the cluster +after the current token has expired, you can create a new token by running the +following command on the control-plane node:

    +
    kubeadm token create
    +
    +

    The output is similar to this: +5didvk.d09sbcov8ph2amjw

    +

    We can use this new token to join:

    +
    kubeadm join <master-ip>:<master-port> --token <token> \
    +    --discovery-token-ca-cert-hash sha256:<hash>
    +
    +
  • +
+
+

Now that you have initialized the master - you can now work on bootstrapping the +worker nodes.

+
    +
  • +

    SSH into worker1 and worker2

    +
  • +
  • +

    Switch to root user on both the machines: sudo su

    +
  • +
  • +

    Check the output given by the init command on master to join worker node:

    +
    kubeadm join 192.168.0.167:6443 --token cnslau.kd5fjt96jeuzymzb \
    +    --discovery-token-ca-cert-hash sha256:871ab3f050bc9790c977daee9e44cf52e15ee37ab9834567333b939458a5bfb5
    +
    +
  • +
  • +

    Execute the above command on both the nodes:

    +
  • +
  • +

    Your output should look like:

    +
    This node has joined the cluster:
    +* Certificate signing request was sent to apiserver and a response was received.
    +* The Kubelet was informed of the new secure connection details.
    +
    +
  • +
+
+

Validate all cluster components and nodes are visible on all nodes

+
    +
  • +

    Verify the cluster

    +
    kubectl get nodes
    +
    +NAME      STATUS        ROLES                  AGE     VERSION
    +master    NotReady      control-plane,master   21m     v1.26.1
    +worker1   Ready         <none>                 9m17s   v1.26.1
    +worker2   Ready         <none>                 9m25s   v1.26.1
    +
    +
  • +
+
+

Install CNI network plugin

+

CNI overview

+

Managing a network where containers can interoperate efficiently is very +important. Kubernetes has adopted the Container Network Interface(CNI) +specification for managing network resources on a cluster. This relatively +simple specification makes it easy for Kubernetes to interact with a wide range +of CNI-based software solutions. Using this CNI plugin allows Kubernetes pods to +have the same IP address inside the pod as they do on the VPC network. Make sure +the configuration corresponds to the Pod CIDR specified in the kubeadm +configuration file if applicable.

+

You must deploy a CNI based Pod network add-on so that your Pods can communicate +with each other. Cluster DNS (CoreDNS) will not start up before a network is +installed. To verify you can run this command: kubectl get po -n kube-system:

+

You should see the following output. You will see the two coredns-* pods in a +pending state. It is the expected behavior. Once we install the network plugin, +it will be in a Running state.

+

Output Example:

+
root@master:~$ kubectl get po -n kube-system
+ NAME                               READY  STATUS   RESTARTS  AGE
+coredns-558bd4d5db-5jktc             0/1   Pending   0        10m
+coredns-558bd4d5db-xdc5x             0/1   Pending   0        10m
+etcd-master1                         1/1   Running   0        11m
+kube-apiserver-master1               1/1   Running   0        11m
+kube-controller-manager-master1      1/1   Running   0        11m
+kube-proxy-5jfh5                     1/1   Running   0        10m
+kube-scheduler-master1               1/1   Running   0        11m
+
+

Supported CNI options

+

To read more about the currently supported base CNI solutions for Kubernetes +read here +and also read this.

+

The below command can be run on the master node to install the CNI plugin:

+
kubectl apply -f https://github.com/coreos/flannel/raw/master/Documentation/kube-flannel.yml
+
+

As you had passed --pod-network-cidr=10.244.0.0/16 with kubeadm init so this +should work for Flannel CNI.

+
+

Using Other CNI Options

+

For Calico CNI plugin to work correctly, you need to pass +--pod-network-cidr=192.168.0.0/16 with kubeadm init and then you can run: +kubectl apply -f https://docs.projectcalico.org/v3.8/manifests/calico.yaml

+
+
+

For Weave Net CNI plugin to work correctly, you don't need to pass +--pod-network-cidr with kubeadm init and then you can run: +kubectl apply -f "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl + version | base64 | tr -d '\n')"

+

Dual Network:

+

It is highly recommended to follow an internal/external network layout for +your cluster, as showed in this diagram:

+

Dual Network Diagram

+

To enable this just give two different names to the internal and external interface, +according to your distro of choiche naming scheme:

+
external_interface: eth0
+internal_interface: eth1
+
+

Also you can decide here what CIDR should your cluster use

+
cluster_cidr: 10.43.0.0/16
+service_cidr: 10.44.0.0/16
+
+

Once you successfully installed the Flannel CNI component to your cluster. +You can now verify your HA cluster running:

+
kubectl get nodes
+
+NAME      STATUS   ROLES                    AGE   VERSION
+master    Ready    control-plane,master     22m   v1.26.1
+worker1   Ready    <none>                   10m   v1.26.1
+worker2   Ready    <none>                   10m   v1.26.1
+
+

Watch Recorded Video showing the above steps on setting up the cluster

+

Here’s a quick recorded demo video +upto this point where we successfully setup single master K8s cluster using Kubeadm.

+
+

Deploy A Sample Nginx Application From the master node

+

Now that we have all the components to make the cluster and applications work, +let’s deploy a sample Nginx application and see if we can access it over a +NodePort +that has port range of 30000-32767.

+

The below command can be run on:

+
kubectl run nginx --image=nginx --port=80
+kubectl expose pod nginx --port=80 --type=NodePort
+
+

To check which NodePort is opened and running the Nginx run:

+
kubectl get svc
+
+

The output will show:

+

Running Services

+

Once the deployment is up, you should be able to access the Nginx home page on +the allocated NodePort from either of the worker nodes' Floating IP.

+

To check which worker node is serving nginx, you can check NODE column +running the following command:

+
kubectl get pods --all-namespaces --output wide
+
+

OR,

+
kubectl get pods -A -o wide
+
+

This will show like below:

+

Nginx Pod and Worker

+

Go to browser, visit http://<Worker-Floating-IP>:<NodePort> +i.e. http://128.31.25.246:32713 to check the nginx default page. +Here Worker_Floating-IP corresponds to the Floating IP of the nginx pod +running worker node i.e. worker2.

+

For your example,

+

nginx default page

+
+

Deploy A K8s Dashboard

+

You will going to setup K8dash/Skooner +to view a dashboard that shows all your K8s cluster components.

+
    +
  • +

    SSH into master node

    +
  • +
  • +

    Switch to root user: sudo su

    +
  • +
  • +

    Apply available deployment by running the following command:

    +
    kubectl apply -f https://raw.githubusercontent.com/skooner-k8s/skooner/master/kubernetes-skooner-nodeport.yaml
    +
    +

    This will map Skooner port 4654 to a randomly selected port from the master +node. The assigned NodePort on the master node can be found running:

    +
    kubectl get svc --namespace=kube-system
    +
    +

    OR,

    +
    kubectl get po,svc -n kube-system
    +
    +

    Skooner Service Port

    +

    To check which worker node is serving skooner-*, you can check NODE column +running the following command:

    +
    kubectl get pods --all-namespaces --output wide
    +
    +

    OR,

    +
    kubectl get pods -A -o wide
    +
    +

    This will show like below:

    +

    Skooner Pod and Worker

    +

    Go to browser, visit http://<Worker-Floating-IP>:<NodePort> i.e. +http://128.31.25.246:30495 to check the skooner dashboard page. +Here Worker_Floating-IP corresponds to the Floating IP of the skooner-* pod +running worker node i.e. worker2.

    +

    Skooner Dashboard

    +
  • +
+

Setup the Service Account Token to access the Skooner Dashboard:

+

The first (and easiest) option is to create a dedicated service account. Run the +following commands:

+
    +
  • +

    Create the service account in the current namespace (we assume default)

    +
    kubectl create serviceaccount skooner-sa
    +
    +
  • +
  • +

    Give that service account root on the cluster

    +
    kubectl create clusterrolebinding skooner-sa --clusterrole=cluster-admin --serviceaccount=default:skooner-sa
    +
    +
  • +
  • +

    Create a secret that was created to hold the token for the SA:

    +
    kubectl apply -f - <<EOF
    +apiVersion: v1
    +kind: Secret
    +metadata:
    +    name: skooner-sa-token
    +    annotations:
    +        kubernetes.io/service-account.name: skooner-sa
    +type: kubernetes.io/service-account-token
    +EOF
    +
    +
    +

    Information

    +

    Since 1.22, this type of Secret is no longer used to mount credentials into +Pods, and obtaining tokens via the TokenRequest API +is recommended instead of using service account token Secret objects. Tokens +obtained from the TokenRequest API are more secure than ones stored in +Secret objects, because they have a bounded lifetime and are not readable +by other API clients. You can use the kubectl create token command to +obtain a token from the TokenRequest API. For example: +kubectl create token skooner-sa, where skooner-sa is service account +name.

    +
    +
  • +
  • +

    Find the secret that was created to hold the token for the SA

    +
    kubectl get secrets
    +
    +
  • +
  • +

    Show the contents of the secret to extract the token

    +
    kubectl describe secret skooner-sa-token
    +
    +
  • +
+

Copy the token value from the secret detail and enter it into the login screen +to access the dashboard.

+

Watch Demo Video showing how to deploy applications

+

Here’s a recorded demo video +on how to deploy applications on top of setup single master K8s cluster as +explained above.

+
+

Very Important: Certificates Renewal

+

Client certificates generated by kubeadm expire after one year unless the +Kubernetes version is upgraded or the certificates are manually renewed.

+

To renew certificates manually, you can use the kubeadm certs renew command with +the appropriate command line options. After running the command, you should +restart the control plane Pods.

+

kubeadm certs renew can renew any specific certificate or, with the subcommand +all, it can renew all of them, as shown below:

+
kubeadm certs renew all
+
+

Once renewing certificates is done. You must restart the kube-apiserver, +kube-controller-manager, kube-scheduler and etcd, so that they can use the +new certificates by running:

+
systemctl restart kubelet
+
+

Then, update the new kube config file:

+
export KUBECONFIG=/etc/kubernetes/admin.conf
+sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
+
+
+

Don't Forget to Update the older kube config file

+

Update wherever you are using the older kube config to connect with the cluster.

+
+

Clean Up

+
    +
  • +

    To view the Cluster info:

    +
    kubectl cluster-info
    +
    +
  • +
  • +

    To delete your local references to the cluster:

    +
    kubectl config delete-cluster
    +
    +
  • +
+

How to Remove the node?

+

Talking to the control-plane node with the appropriate credentials, run:

+
kubectl drain <node name> --delete-emptydir-data --force --ignore-daemonsets
+
+
    +
  • +

    Before removing the node, reset the state installed by kubeadm:

    +
    kubeadm reset
    +
    +

    The reset process does not reset or clean up iptables rules or IPVS tables. If +you wish to reset iptables, you must do so manually:

    +
    iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
    +
    +

    If you want to reset the IPVS tables, you must run the following command:

    +
    ipvsadm -C
    +
    +
  • +
  • +

    Now remove the node:

    +
    kubectl delete node <node name>
    +
    +
  • +
+

If you wish to start over, run kubeadm init or kubeadm join with the +appropriate arguments.

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/other-tools/kubernetes/kubernetes/index.html b/other-tools/kubernetes/kubernetes/index.html new file mode 100644 index 00000000..e7868a70 --- /dev/null +++ b/other-tools/kubernetes/kubernetes/index.html @@ -0,0 +1,4665 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + +
+ +
+ + + +
+
+ + + + + + + +

Kubernetes Overview

+

Kubernetes, commonly known as K8s is an open sourced container orchestration +tool for managing containerized cloud-native workloads and services in computing, +networking, and storage infrastructure. K8s can help to deploy and manage +containerized applications like platforms as a service(PaaS), batch processing +workers, and microservices in the cloud at scale. It reduces cloud computing +costs while simplifying the operation of resilient and scalable applications. +While it is possible to install and manage Kubernetes on infrastructure that you +manage, it is a time-consuming and complicated process. To make provisioning and +deploying clusters much easier, we have listed a number of popular platforms and +tools to setup your K8s on your NERC's OpenStack Project space.

+

Kubernetes Components & Architecture

+

A Kubernetes cluster consists of a set of worker machines, called nodes, that +run containerized applications. Every cluster has at least one worker node. The +worker node(s) host the Pods that are the components of the application workload.

+

The control plane or master manages the worker nodes and the Pods in the cluster. +In production environments, the control plane usually runs across multiple +computers and a cluster usually runs multiple nodes, providing fault-tolerance, +redundancy, and high availability.

+

Here's the diagram of a Kubernetes cluster with all the components tied together. +Kubernetes Components & Architecture

+

Kubernetes Basics workflow

+
    +
  1. +

    Create a Kubernetes cluster + Create a Kubernetes cluster

    +
  2. +
  3. +

    Deploy an app + Deploy an app

    +
  4. +
  5. +

    Explore your app + Explore your app

    +
  6. +
  7. +

    Expose your app publicly + Expose your app publicly

    +
  8. +
  9. +

    Scale up your app + Scale up your app

    +
  10. +
  11. +

    Update your app + Update your app

    +
  12. +
+

Development environment

+
    +
  1. +

    Minikube is a local Kubernetes + cluster that focuses on making Kubernetes development and learning simple. + Kubernetes may be started with just a single command if you have a Docker + (or similarly comparable) container or a Virtual Machine environment. + For more read this.

    +
  2. +
  3. +

    Kind is a tool for running + local Kubernetes clusters utilizing Docker container "nodes". It was built for + Kubernetes testing, but it may also be used for local development and continuous + integration. For more read this.

    +
  4. +
  5. +

    MicroK8s is the smallest, fastest, and most conformant + Kubernetes that tracks upstream releases and simplifies clustering. MicroK8s + is ideal for prototyping, testing, and offline development. + For more read this.

    +
  6. +
  7. +

    K3s is a single <40MB binary, certified Kubernetes distribution + developed by Rancher Labs and now a CNCF sandbox project that fully implements + the Kubernetes API and is less than 40MB in size. To do so, they got rid of + a lot of additional drivers that didn't need to be in the core and could + easily be replaced with add-ons. For more read this.

    +

    To setup a Multi-master HA K3s cluster using k3sup(pronounced ketchup) +read this.

    +

    To setup a Single-Node K3s Cluster using k3d read this +and if you would like to setup Multi-master K3s cluster setup using k3d +read this.

    +
  8. +
  9. +

    k0s is an all-inclusive Kubernetes distribution, + configured with all of the features needed to build a Kubernetes cluster simply + by copying and running an executable file on each target host. + For more read this.

    +
  10. +
+

Production environment

+

If your Kubernetes cluster has to run critical workloads, it must be configured +to be resilient and higly available(HA) production-ready Kubernetes cluster. To +setup production-quality cluster, you can use the following deployment tools.

+
    +
  1. +

    Kubeadm + performs the actions necessary to get a minimum viable, secure cluster up and + running in a user friendly way. + Bootstrapping cluster with kubeadm read this + and if you would like to setup Multi-master cluster setup using Kubeadm + read this.

    +
  2. +
  3. +

    Kubespray + helps to install a Kubernetes cluster on NERC OpenStack. Kubespray is a + composition of Ansible playbooks, inventory, provisioning tools, and domain + knowledge for generic OS/Kubernetes clusters configuration management tasks. + Installing Kubernetes with Kubespray read this.

    +
  4. +
+

To choose a tool which best fits your use case, read this comparison.

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/other-tools/kubernetes/kubespray/index.html b/other-tools/kubernetes/kubespray/index.html new file mode 100644 index 00000000..094f07b6 --- /dev/null +++ b/other-tools/kubernetes/kubespray/index.html @@ -0,0 +1,4907 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Kubespray

+

Pre-requisite

+

We will need 1 control-plane(master) and 1 worker node to create a single +control-plane kubernetes cluster using Kubespray. We are using following setting +for this purpose:

+
    +
  • +

    1 Linux machine for Ansible master, ubuntu-22.04-x86_64 or your choice of Ubuntu + OS image, cpu-su.2 flavor with 2vCPU, 8GB RAM, 20GB storage.

    +
  • +
  • +

    1 Linux machine for master, ubuntu-22.04-x86_64 or your choice of Ubuntu + OS image, cpu-su.2 flavor with 2vCPU, 8GB RAM, 20GB storage - + also assign Floating IP + to the master node.

    +
  • +
  • +

    1 Linux machines for worker, ubuntu-22.04-x86_64 or your choice of Ubuntu + OS image, cpu-su.1 flavor with 1vCPU, 4GB RAM, 20GB storage.

    +
  • +
  • +

    ssh access to all machines: Read more here + on how to set up SSH on your remote VMs.

    +
  • +
  • +

    To allow SSH from Ansible master to all other nodes: Read more here + Generate SSH key for Ansible master node using:

    +
    ssh-keygen -t rsa
    +
    +Generating public/private rsa key pair.
    +Enter file in which to save the key (/root/.ssh/id_rsa):
    +Enter passphrase (empty for no passphrase):
    +Enter same passphrase again:
    +Your identification has been saved in /root/.ssh/id_rsa
    +Your public key has been saved in /root/.ssh/id_rsa.pub
    +The key fingerprint is:
    +SHA256:OMsKP7EmhT400AJA/KN1smKt6eTaa3QFQUiepmj8dxroot@ansible-master
    +The key's randomart image is:
    ++---[RSA 3072]----+
    +|=o.oo.           |
    +|.o...            |
    +|..=  .           |
    +|=o.= ...         |
    +|o=+.=.o SE       |
    +|.+*o+. o. .      |
    +|.=== +o. .       |
    +|o+=o=..          |
    +|++o=o.           |
    ++----[SHA256]-----+
    +
    +

    Copy and append the contents of SSH public key i.e. ~/.ssh/id_rsa.pub to +other nodes's ~/.ssh/authorized_keys file. Please make sure you are logged +in as root user by doing sudo su before you copy this public key to the +end of ~/.ssh/authorized_keys file of the other master and worker nodes. This +will allow ssh <other_nodes_internal_ip> from the Ansible master node's terminal.

    +
  • +
  • +

    Create 2 security groups with appropriate ports and protocols:

    +

    i. To be used by the master nodes: +Control plane ports and protocols

    +

    ii. To be used by the worker nodes: +Worker node ports and protocols

    +
  • +
  • +

    setup Unique hostname to each machine using the following command:

    +
    echo "<node_internal_IP> <host_name>" >> /etc/hosts
    +hostnamectl set-hostname <host_name>
    +
    +

    For example:

    +
    echo "192.168.0.224 ansible_master" >> /etc/hosts
    +hostnamectl set-hostname ansible_master
    +
    +
  • +
+

In this step, you will update packages and disable swap on the all 3 nodes:

+
    +
  • +

    1 Ansible Master Node - ansible_master

    +
  • +
  • +

    1 Kubernetes Master Node - kubspray_master

    +
  • +
  • +

    1 Kubernetes Worker Node - kubspray_worker1

    +
  • +
+

The below steps will be performed on all the above mentioned nodes:

+
    +
  • +

    SSH into all the 3 machines

    +
  • +
  • +

    Switch as root: sudo su

    +
  • +
  • +

    Update the repositories and packages:

    +
    apt-get update && apt-get upgrade -y
    +
    +
  • +
  • +

    Turn off swap

    +
    swapoff -a
    +sed -i '/ swap / s/^/#/' /etc/fstab
    +
    +
  • +
+
+

Configure Kubespray on ansible_master node using Ansible Playbook

+

Run the below command on the master node i.e. master that you want to setup as +control plane.

+
    +
  • +

    SSH into ansible_master machine

    +
  • +
  • +

    Switch to root user: sudo su

    +
  • +
  • +

    Execute the below command to initialize the cluster:

    +
  • +
  • +

    Install Python3 and upgrade pip to pip3:

    +
    apt install python3-pip -y
    +pip3 install --upgrade pip
    +python3 -V && pip3 -V
    +pip -V
    +
    +
  • +
  • +

    Clone the Kubespray git repository:

    +
    git clone https://github.com/kubernetes-sigs/kubespray.git
    +cd kubespray
    +
    +
  • +
  • +

    Install dependencies from requirements.txt:

    +
    pip install -r requirements.txt
    +
    +
  • +
  • +

    Copy inventory/sample as inventory/mycluster

    +
    cp -rfp inventory/sample inventory/mycluster
    +
    +
  • +
  • +

    Update Ansible inventory file with inventory builder:

    +

    This step is little trivial because we need to update hosts.yml with the nodes +IP.

    +

    Now we are going to declare a variable "IPS" for storing the IP address of +other K8s nodes .i.e. kubspray_master(192.168.0.130), kubspray_worker1(192.168.0.32)

    +
    declare -a IPS=(192.168.0.130 192.168.0.32)
    +CONFIG_FILE=inventory/mycluster/hosts.yml python3 \
    +    contrib/inventory_builder/inventory.py ${IPS[@]}
    +
    +

    This outputs:

    +
    DEBUG: Adding group all
    +DEBUG: Adding group kube_control_plane
    +DEBUG: Adding group kube_node
    +DEBUG: Adding group etcd
    +DEBUG: Adding group k8s_cluster
    +DEBUG: Adding group calico_rr
    +DEBUG: adding host node1 to group all
    +DEBUG: adding host node2 to group all
    +DEBUG: adding host node1 to group etcd
    +DEBUG: adding host node1 to group kube_control_plane
    +DEBUG: adding host node2 to group kube_control_plane
    +DEBUG: adding host node1 to group kube_node
    +DEBUG: adding host node2 to group kube_node
    +
    +
  • +
  • +

    After running the above commands do verify the hosts.yml and its content:

    +
    cat inventory/mycluster/hosts.yml
    +
    +

    The contents of the hosts.yml file should looks like:

    +
    all:
    +    hosts:
    +        node1:
    +            ansible_host: 192.168.0.130
    +            ip: 192.168.0.130
    +            access_ip: 192.168.0.130
    +        node2:
    +            ansible_host: 192.168.0.32
    +            ip: 192.168.0.32
    +            access_ip: 192.168.0.32
    +    children:
    +        kube_control_plane:
    +            hosts:
    +                node1:
    +                node2:
    +        kube_node:
    +            hosts:
    +                node1:
    +                node2:
    +        etcd:
    +            hosts:
    +                node1:
    +        k8s_cluster:
    +            children:
    +                kube_control_plane:
    +                kube_node:
    +        calico_rr:
    +            hosts: {}
    +
    +
  • +
  • +

    Review and change parameters under inventory/mycluster/group_vars

    +
    cat inventory/mycluster/group_vars/all/all.yml
    +cat inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml
    +
    +
  • +
  • +

    It can be useful to set the following two variables to true in + inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml: kubeconfig_localhost + (to make a copy of kubeconfig on the host that runs Ansible in + { inventory_dir }/artifacts) and kubectl_localhost + (to download kubectl onto the host that runs Ansible in { bin_dir }).

    +
    +

    Very Important

    +

    As Ubuntu 20 kvm kernel doesn't have dummy module we need to modify +the following two variables in inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml: +enable_nodelocaldns: false and kube_proxy_mode: iptables which will +Disable nodelocal dns cache and Kube-proxy proxyMode to iptables respectively.

    +
    +
  • +
  • +

    Deploy Kubespray with Ansible Playbook - run the playbook as root user. + The option --become is required, as for example writing SSL keys in /etc/, + installing packages and interacting with various systemd daemons. Without + --become the playbook will fail to run!

    +
    ansible-playbook -i inventory/mycluster/hosts.yml --become --become-user=root cluster.yml
    +
    +
    +

    Note

    +

    Running ansible playbook takes little time because it depends on the network +bandwidth also.

    +
    +
  • +
+
+

Install kubectl on Kubernetes master node .i.e. kubspray_master

+
    +
  • +

    Install kubectl binary

    +
    snap install kubectl --classic
    +
    +

    This outputs: kubectl 1.26.1 from Canonical✓ installed

    +
  • +
  • +

    Now verify the kubectl version:

    +
    kubectl version -o yaml
    +
    +
  • +
+
+

Validate all cluster components and nodes are visible on all nodes

+
    +
  • +

    Verify the cluster

    +
    kubectl get nodes
    +
    +NAME    STATUS   ROLES                  AGE     VERSION
    +node1   Ready    control-plane,master   6m7s    v1.26.1
    +node2   Ready    control-plane,master   5m32s   v1.26.1
    +
    +
  • +
+
+

Deploy A Hello Minikube Application

+
    +
  • +

    Use the kubectl create command to create a Deployment that manages a Pod. The + Pod runs a Container based on the provided Docker image.

    +
    kubectl create deployment hello-minikube --image=k8s.gcr.io/echoserver:1.4
    +
    +
    kubectl expose deployment hello-minikube --type=LoadBalancer --port=8080
    +
    +service/hello-minikube exposed
    +
    +
  • +
  • +

    View the deployments information:

    +
    kubectl get deployments
    +
    +NAME             READY   UP-TO-DATE   AVAILABLE   AGE
    +hello-minikube   1/1     1            1           50s
    +
    +
  • +
  • +

    View the port information:

    +
    kubectl get svc hello-minikube
    +
    +NAME             TYPE           CLUSTER-IP      EXTERNAL-IP   PORT(S)          AGE
    +hello-minikube   LoadBalancer   10.233.35.126   <pending>     8080:30723/TCP   40s
    +
    +
  • +
  • +

    Expose the service locally

    +
    kubectl port-forward svc/hello-minikube 30723:8080
    +
    +Forwarding from [::1]:30723 -> 8080
    +Forwarding from 127.0.0.1:30723 -> 8080
    +Handling connection for 30723
    +Handling connection for 30723
    +
    +
  • +
+

Go to browser, visit http://<Master-Floating-IP>:8080 +i.e. http://140.247.152.235:8080/ to check the hello minikube default page.

+

Clean up

+

Now you can clean up the app resources you created in your cluster:

+
kubectl delete service hello-minikube
+kubectl delete deployment hello-minikube
+
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/other-tools/kubernetes/microk8s/index.html b/other-tools/kubernetes/microk8s/index.html new file mode 100644 index 00000000..3544434e --- /dev/null +++ b/other-tools/kubernetes/microk8s/index.html @@ -0,0 +1,4757 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Microk8s

+

Pre-requisite

+

We will need 1 VM to create a single node kubernetes cluster using microk8s. +We are using following setting for this purpose:

+
    +
  • +

    1 Linux machine, ubuntu-22.04-x86_64 or your choice of Ubuntu OS image, + cpu-su.2 flavor with 2vCPU, 8GB RAM, 20GB storage - also assign Floating IP + to this VM.

    +
  • +
  • +

    setup Unique hostname to the machine using the following command:

    +
    echo "<node_internal_IP> <host_name>" >> /etc/hosts
    +hostnamectl set-hostname <host_name>
    +
    +

    For example:

    +
    echo "192.168.0.62 microk8s" >> /etc/hosts
    +hostnamectl set-hostname microk8s
    +
    +
  • +
+

Install MicroK8s on Ubuntu

+

Run the below command on the Ubuntu VM:

+
    +
  • +

    SSH into microk8s machine

    +
  • +
  • +

    Switch to root user: sudo su

    +
  • +
  • +

    Update the repositories and packages:

    +
    apt-get update && apt-get upgrade -y
    +
    +
  • +
  • +

    Install MicroK8s:

    +
    sudo snap install microk8s --classic
    +
    +
  • +
  • +

    Check the status while Kubernetes starts

    +
    microk8s status --wait-ready
    +
    +
  • +
  • +

    Turn on the services you want:

    +
    microk8s enable dns dashboard
    +
    +

    Try microk8s enable --help for a list of available services and optional features. +microk8s disable <name> turns off a service. For example other useful services +are: microk8s enable registry istio storage

    +
  • +
  • +

    Start using Kubernetes

    +
    microk8s kubectl get all --all-namespaces
    +
    +

    If you mainly use MicroK8s you can make our kubectl the default one on your +command-line with alias mkctl="microk8s kubectl". Since it is a standard +upstream kubectl, you can also drive other Kubernetes clusters with it by +pointing to the respective kubeconfig file via the --kubeconfig argument.

    +
  • +
  • +

    Access the Kubernetes dashboard + UI:

    +

    Microk8s Dashboard Ports

    +

    As we see above the kubernetes-dashboard service in the kube-system namespace +has a ClusterIP of 10.152.183.73 and listens on TCP port 443. The ClusterIP +is randomly assigned, so if you follow these steps on your host, make sure +you check the IP adress you got.

    +
    +

    Note

    +

    Another way to access the default token to be used for the dashboard access +can be retrieved with:

    +
    token=$(microk8s kubectl -n kube-system get secret | grep default-token | cut -d "" -f1)
    +microk8s kubectl -n kube-system describe secret $token
    +
    +
    +
  • +
  • +

    Keep running the kubernetes-dashboad on Proxy to access it via web browser:

    +
    microk8s dashboard-proxy
    +
    +Checking if Dashboard is running.
    +Dashboard will be available at https://127.0.0.1:10443
    +Use the following token to login:
    +eyJhbGc....
    +
    +
    +

    Important

    +

    This tells us the IP address of the Dashboard and the port. The values assigned +to your Dashboard will differ. Please note the displayed PORT and +the TOKEN that are required to access the kubernetes-dashboard. Make +sure, the exposed PORT is opened in Security Groups for the instance +following this guide.

    +
    +

    This will show the token to login to the Dashbord shown on the url with NodePort.

    +

    You'll need to wait a few minutes before the dashboard becomes available. If +you open a web browser on the same desktop you deployed Microk8s and point it +to https://<Floating-IP>:<PORT> (where PORT is the PORT assigned to the Dashboard +noted while running the above command), you’ll need to accept the risk +(because the Dashboard uses a self-signed certificate). And, we can enter the +previously noted TOKEN to access the kubernetes-dashboard.

    +

    The K8s Dashboard service

    +

    Once you enter the correct TOKEN the kubernetes-dashboard is accessed and +looks like below:

    +

    The K8s Dashboard service interface

    +
  • +
+
+

Information

+
    +
  • +

    Start and stop Kubernetes: +Kubernetes is a collection of system services that talk to each other all +the time. If you don’t need them running in the background then you will +save battery by stopping them. microk8s start and microk8s stop will +those tasks for you.

    +
  • +
  • +

    To Reset the infrastructure to a clean state: microk8s reset

    +
  • +
+
+

Deploy a Container using the Kubernetes-Dashboard

+

Click on the + button in the top left corner of the main window. On the resulting +page, click Create from form and then fill out the necessary information as shown +below:

+

Deploying a test NGINX container named tns

+

You should immediately be directed to a page that lists your new deployment as shown +below:

+

The running NGINX container

+

Go back to the terminal window and issue the command:

+
microk8s kubectl get svc tns -n kube-system
+
+NAME   TYPE           CLUSTER-IP      EXTERNAL-IP   PORT(S)          AGE
+tns    LoadBalancer   10.152.183.90   <pending>     8080:30012/TCP   14m
+
+

Go to browser, visit http://<Floating-IP>:<NodePort> +i.e. http://128.31.26.4:30012/ to check the nginx default page.

+

Deploy A Sample Nginx Application

+
    +
  • +

    Create an alias:

    +
    alias mkctl="microk8s kubectl"
    +
    +
  • +
  • +

    Create a deployment, in this case Nginx:

    +
    mkctl create deployment --image nginx my-nginx
    +
    +
  • +
  • +

    To access the deployment we will need to expose it:

    +
    mkctl expose deployment my-nginx --port=80 --type=NodePort
    +
    +
    mkctl get svc my-nginx
    +
    +NAME       TYPE       CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE
    +my-nginx   NodePort   10.152.183.41   <none>        80:31225/TCP   35h
    +
    +
  • +
+

Go to browser, visit http://<Floating-IP>:<NodePort> +i.e. http://128.31.26.4:31225/ to check the nginx default page.

+

Deploy Another Application

+

You can start by creating a microbot deployment with two pods via the kubectl cli:

+
mkctl create deployment microbot --image=dontrebootme/microbot:v1
+mkctl scale deployment microbot --replicas=2
+
+

To expose the deployment to NodePort, you need to create a service:

+
mkctl expose deployment microbot --type=NodePort --port=80 --name=microbot-service
+
+

View the port information:

+
mkctl get svc microbot-service
+
+NAME               TYPE       CLUSTER-IP     EXTERNAL-IP   PORT(S)        AGE
+microbot-service   NodePort   10.152.183.8   <none>        80:31442/TCP   35h
+
+

Go to browser, visit http://<Floating-IP>:<NodePort> +i.e. http://128.31.26.4:31442/ to check the microbot default page.

+

Microk8s Microbot App

+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/other-tools/kubernetes/minikube/index.html b/other-tools/kubernetes/minikube/index.html new file mode 100644 index 00000000..f0aafd78 --- /dev/null +++ b/other-tools/kubernetes/minikube/index.html @@ -0,0 +1,5104 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + +New England Research Cloud(NERC) + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + + +
+ + + + + + + + +
+ + +
+ +
+ + + + + + +
+
+ + + +
+
+
+ + + + + +
+
+
+ + + + + + + +
+
+ + + + + + + +

Minikube

+

Minimum system requirements for minikube

+
    +
  • 2 GB RAM or more
  • +
  • 2 CPU / vCPUs or more
  • +
  • 20 GB free hard disk space or more
  • +
  • Docker / Virtual Machine Manager – KVM & VirtualBox. Docker, Hyperkit, Hyper-V, + KVM, Parallels, Podman, VirtualBox, or VMWare are examples of container or virtual + machine managers.
  • +
+

Pre-requisite

+

We will need 1 VM to create a single node kubernetes cluster using minikube. +We are using following setting for this purpose:

+
    +
  • +

    1 Linux machine for master, ubuntu-22.04-x86_64 or your choice of Ubuntu OS + image, cpu-su.2 flavor with 2vCPU, 8GB RAM, 20GB storage - also + assign Floating IP + to this VM.

    +
  • +
  • +

    setup Unique hostname to the machine using the following command:

    +
    echo "<node_internal_IP> <host_name>" >> /etc/hosts
    +hostnamectl set-hostname <host_name>
    +
    +

    For example:

    +
    echo "192.168.0.62 minikube" >> /etc/hosts
    +hostnamectl set-hostname minikube
    +
    +
  • +
+

Install Minikube on Ubuntu

+

Run the below command on the Ubuntu VM:

+
+

Very Important

+

Run the following steps as non-root user i.e. ubuntu.

+
+
    +
  • +

    SSH into minikube machine

    +
  • +
  • +

    Update the repositories and packages:

    +
    sudo apt-get update && sudo apt-get upgrade -y
    +
    +
  • +
  • +

    Install curl, wget, and apt-transport-https

    +
    sudo apt-get update && sudo apt-get install -y curl wget apt-transport-https
    +
    +
  • +
+
+

Download and install the latest version of Docker CE

+
    +
  • +

    Download and install Docker CE:

    +
    curl -fsSL https://get.docker.com -o get-docker.sh
    +sudo sh get-docker.sh
    +
    +
  • +
  • +

    Configure the Docker daemon:

    +
    sudo usermod -aG docker $USER && newgrp docker
    +
    +
  • +
+
+

Install kubectl

+
    +
  • +

    Install kubectl binary

    +

    kubectl: the command line util to talk to your cluster.

    +
    sudo snap install kubectl --classic
    +
    +

    This outputs:

    +
    kubectl 1.26.1 from Canonical✓ installed
    +
    +
  • +
  • +

    Now verify the kubectl version:

    +
    sudo kubectl version -o yaml
    +
    +
  • +
+
+

Install the container runtime i.e. containerd on master and worker nodes

+

To run containers in Pods, Kubernetes uses a container runtime.

+

By default, Kubernetes uses the Container Runtime Interface (CRI) to interface +with your chosen container runtime.

+
    +
  • +

    Install container runtime - containerd

    +

    The first thing to do is configure the persistent loading of the necessary +containerd modules. This forwarding IPv4 and letting iptables see bridged +trafficis is done with the following command:

    +
    cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
    +overlay
    +br_netfilter
    +EOF
    +
    +sudo modprobe overlay
    +sudo modprobe br_netfilter
    +
    +
  • +
  • +

    Ensure net.bridge.bridge-nf-call-iptables is set to 1 in your sysctl config:

    +
    # sysctl params required by setup, params persist across reboots
    +cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
    +net.bridge.bridge-nf-call-iptables  = 1
    +net.bridge.bridge-nf-call-ip6tables = 1
    +net.ipv4.ip_forward                 = 1
    +EOF
    +
    +
  • +
  • +

    Apply sysctl params without reboot:

    +
    sudo sysctl --system
    +
    +
  • +
  • +

    Install the necessary dependencies with:

    +
    sudo apt install -y curl gnupg2 software-properties-common apt-transport-https ca-certificates
    +
    +
  • +
  • +

    The containerd.io packages in DEB and RPM formats are distributed by Docker. + Add the required GPG key with:

    +
    curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
    +sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
    +
    +

    It's now time to Install and configure containerd:

    +
    sudo apt update -y
    +sudo apt install -y containerd.io
    +containerd config default | sudo tee /etc/containerd/config.toml
    +
    +# Reload the systemd daemon with
    +sudo systemctl daemon-reload
    +
    +# Start containerd
    +sudo systemctl restart containerd
    +sudo systemctl enable --now containerd
    +
    +

    You can verify containerd is running with the command:

    +
    sudo systemctl status containerd
    +
    +
  • +
+
+

Installing minikube

+
    +
  • +

    Install minikube

    +
    curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube_latest_amd64.deb
    +sudo dpkg -i minikube_latest_amd64.deb
    +
    +

    OR, install minikube using wget:

    +
    wget https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64
    +cp minikube-linux-amd64 /usr/bin/minikube
    +chmod +x /usr/bin/minikube
    +
    +
  • +
  • +

    Verify the Minikube installation:

    +
    minikube version
    +
    +minikube version: v1.29.0
    +commit: ddac20b4b34a9c8c857fc602203b6ba2679794d3
    +
    +
  • +
  • +

    Install conntrack:

    +

    Kubernetes 1.26.1 requires conntrack to be installed in root's path:

    +
    sudo apt-get install -y conntrack
    +
    +
  • +
  • +

    Start minikube:

    +

    As we are already stated in the beginning that we would be using docker as base +for minikue, so start the minikube with the docker driver,

    +
    minikube start --driver=docker --container-runtime=containerd
    +
    +
    +

    Note

    +
      +
    • +

      To check the internal IP, run the minikube ip command.

      +
    • +
    • +

      By default, Minikube uses the driver most relevant to the host OS. To +use a different driver, set the --driver flag in minikube start. For +example, to use others or none instead of Docker, run +minikube start --driver=none. To persistent configuration so that +you to run minikube start without explicitly passing i.e. in global scope +the --vm-driver docker flag each time, run: +minikube config set vm-driver docker.

      +
    • +
    • +

      Other start options: +minikube start --force --driver=docker --network-plugin=cni --container-runtime=containerd

      +
    • +
    • +

      In case you want to start minikube with customize resources and want installer +to automatically select the driver then you can run following command, +minikube start --addons=ingress --cpus=2 --cni=flannel --install-addons=true +--kubernetes-version=stable --memory=6g

      +
    • +
    +

    Output would like below:

    +

    Minikube sucessfully started

    +

    Perfect, above confirms that minikube cluster has been configured and started +successfully.

    +
    +
  • +
  • +

    Run below minikube command to check status:

    +
    minikube status
    +
    +minikube
    +type: Control Plane
    +host: Running
    +kubelet: Running
    +apiserver: Running
    +kubeconfig: Configured
    +
    +
  • +
  • +

    Run following kubectl command to verify the cluster info and node status:

    +
    kubectl cluster-info
    +
    +Kubernetes control plane is running at https://192.168.0.62:8443
    +CoreDNS is running at https://192.168.0.62:8443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
    +
    +To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
    +
    +
    kubectl get nodes
    +
    +NAME       STATUS   ROLES                  AGE   VERSION
    +minikube   Ready    control-plane,master   5m    v1.26.1
    +
    +
  • +
  • +

    To see the kubectl configuration use the command:

    +
    kubectl config view
    +
    +

    The output looks like:

    +

    Minikube config view

    +
  • +
  • +

    Get minikube addon details:

    +
    minikube addons list
    +
    +

    The output will display like below: +Minikube addons list

    +

    If you wish to enable any addons run the below minikube command,

    +
    minikube addons enable <addon-name>
    +
    +
  • +
  • +

    Enable minikube dashboard addon:

    +
    minikube dashboard
    +
    +🔌  Enabling dashboard ...
    +     Using image kubernetesui/metrics-scraper:v1.0.7
    +     Using image kubernetesui/dashboard:v2.3.1
    +🤔  Verifying dashboard health ...
    +🚀  Launching proxy ...
    +🤔  Verifying proxy health ...
    +http://127.0.0.1:40783/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/
    +
    +
  • +
  • +

    To view minikube dashboard url:

    +
    minikube dashboard --url
    +
    +🤔  Verifying dashboard health ...
    +🚀  Launching proxy ...
    +🤔  Verifying proxy health ...
    +http://127.0.0.1:42669/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/
    +
    +
  • +
  • +

    Expose Dashboard on NodePort instead of ClusterIP:

    +

    -- Check the current port for kubernetes-dashboard:

    +
    kubectl get services -n kubernetes-dashboard
    +
    +

    The output looks like below:

    +

    Current ClusterIP for Minikube Dashboard

    +
    kubectl edit service kubernetes-dashboard -n kubernetes-dashboard
    +
    +

    -- Replace type: "ClusterIP" to "NodePort":

    +

    Current Dashboard Type

    +

    -- After saving the file: +Test again: kubectl get services -n kubernetes-dashboard

    +

    Now the output should look like below:

    +

    Current NodePort for Minikube Dashboard

    +

    So, now you can browser the K8s Dashboard, visit http://<Floating-IP>:<NodePort> +i.e. http://140.247.152.235:31881 to view the Dashboard.

    +
  • +
+

Deploy A Sample Nginx Application

+
    +
  • +

    Create a deployment, in this case Nginx:

    +

    A Kubernetes Pod is a group of one or more Containers, tied together for the +purposes of administration and networking. The Pod in this tutorial has only +one Container. A Kubernetes Deployment checks on the health of your Pod and +restarts the Pod's Container if it terminates. Deployments are the recommended +way to manage the creation and scaling of Pods.

    +
  • +
  • +

    Let's check if the Kubernetes cluster is up and running:

    +
    kubectl get all --all-namespaces
    +kubectl get po -A
    +kubectl get nodes
    +
    +
    kubectl create deployment --image nginx my-nginx
    +
    +
  • +
  • +

    To access the deployment we will need to expose it:

    +
    kubectl expose deployment my-nginx --port=80 --type=NodePort
    +
    +

    To check which NodePort is opened and running the Nginx run:

    +
    kubectl get svc
    +
    +

    The output will show:

    +

    Minikube Running Services

    +

    OR,

    +
    minikube service list
    +
    +|----------------------|---------------------------|--------------|-------------|
    +|      NAMESPACE       |           NAME            | TARGET PORT  |       URL   |
    +|----------------------|---------------------------|--------------|-------------|
    +| default              | kubernetes                | No node port |
    +| default              | my-nginx                  |           80 | http:.:31081|
    +| kube-system          | kube-dns                  | No node port |
    +| kubernetes-dashboard | dashboard-metrics-scraper | No node port |
    +| kubernetes-dashboard | kubernetes-dashboard      |           80 | http:.:31929|
    +|----------------------|---------------------------|--------------|-------------|
    +
    +

    OR,

    +
    kubectl get svc my-nginx
    +minikube service my-nginx --url
    +
    +

    Once the deployment is up, you should be able to access the Nginx home page on +the allocated NodePort from the node's Floating IP.

    +

    Go to browser, visit http://<Floating-IP>:<NodePort> +i.e. http://140.247.152.235:31081/ to check the nginx default page.

    +

    For your example,

    +

    nginx default page

    +
  • +
+
+

Deploy A Hello Minikube Application

+
    +
  • +

    Use the kubectl create command to create a Deployment that manages a Pod. The + Pod runs a Container based on the provided Docker image.

    +
    kubectl create deployment hello-minikube --image=k8s.gcr.io/echoserver:1.4
    +kubectl expose deployment hello-minikube --type=NodePort --port=8080
    +
    +
  • +
  • +

    View the port information:

    +
    kubectl get svc hello-minikube
    +minikube service hello-minikube --url
    +
    +

    Go to browser, visit http://<Floating-IP>:<NodePort> +i.e. http://140.247.152.235:31293/ to check the hello minikube default page.

    +

    For your example,

    +

    Hello Minikube default page

    +
  • +
+

Clean up

+

Now you can clean up the app resources you created in your cluster:

+
kubectl delete service my-nginx
+kubectl delete deployment my-nginx
+
+kubectl delete service hello-minikube
+kubectl delete deployment hello-minikube
+
+
+

Managing Minikube Cluster

+
    +
  • +

    To stop the minikube, run

    +
    minikube stop
    +
    +
  • +
  • +

    To delete the single node cluster:

    +
    minikube delete
    +
    +
  • +
  • +

    To Start the minikube, run

    +
    minikube start
    +
    +
  • +
  • +

    Remove the Minikube configuration and data directories:

    +
    rm -rf ~/.minikube
    +rm -rf ~/.kube
    +
    +
  • +
  • +

    If you have installed any Minikube related packages, remove them:

    +
    sudo apt remove -y conntrack
    +
    +
  • +
  • +

    In case you want to start the minikube with higher resource like 8 GB RM and + 4 CPU then execute following commands one after the another.

    +
    minikube config set cpus 4
    +minikube config set memory 8192
    +minikube delete
    +minikube start
    +
    +
  • +
+
+ + + + + + + + + + + + + +
+
+ + + +
+ +
+ + + +
+
+
+
+ + + + + + + + + + + + \ No newline at end of file diff --git a/search/search_index.json b/search/search_index.json new file mode 100644 index 00000000..3506dbeb --- /dev/null +++ b/search/search_index.json @@ -0,0 +1 @@ +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"NERC Technical Documentation","text":"

NERC welcomes your contributions

These pages are hosted from a git repository and contributions are welcome!

Fork this repo

"},{"location":"about/","title":"About NERC","text":"

We are currently in the pilot phase of the project and are focusing on developing the technology to make it easy for researchers to take advantage of a suite of services (IaaS, PaaS, SaaS) that are not readily available today. This includes:

  1. The creation of the building blocks needed for production cloud services

  2. Begin collaboration with Systems Engineers from other institutions with well established RC groups

  3. On-board select proof of concept use cases from institutions within the MGHPCC consortium and other institutions within Massachusetts

The longer term objectives will be centered around activities that will focus on:

  1. Engaging with various OpenStack communities by sharing best practices and setting standards for deployments

  2. Connecting regularly with the Mass Open Cloud (MOC) leadership to understand when new technologies they are developing with RedHat, Inc. \u2013 and as part of the new NSF funded Open Cloud Testbed \u2013 might be ready for adoption into the production NERC environment

  3. Broadening the local deployment team of NERC to include partner universities within the MGHPCC consortium.

Figure 1: NERC Overview

NERC production services (red) stand on top of the existing NESE storage services (blue) that are built on the strong foundation of MGHPCC (green) that provides core facility and network access. The Innovation Hub (grey) enables new technologies to be rapidly adopted by the NERC or NESE services. On the far left (purple) are the Research and Learning communities which are the primary customers of NERC. As users proceed down the stack of production services from Web-apps, that require more technical skills, the Cloud Facilitators (orange) in the middle guide and educate users on how to best use the services.

For more information, view NERC's concept document.

"},{"location":"get-started/create-a-user-portal-account/","title":"User Account Types","text":"

NERC offers two types of user accounts: a Principal Investigator (PI) Account and a General User Account. All General Users must be assigned to their project by an active NERC PI or by one of the delegated project manager(s), as described here. Then, those project users can be added to the resource allocation during a new allocation request or at a later time.

Principal Investigator Eligibility Information

  • MGHPCC consortium members, whereby they enter into an service agreement with MGHPCC for the NERC services.

  • Non-members of MGHPCC can also be PIs of NERC Services, but must also have an active non-member agreement with MGHPCC.

  • External research focused institutions will be considered on a case-by-case basis and are subject to an external customer cost structure.

A PI account can request allocations of NERC resources, grant access to other general users enabling them to log into NERC's computational project space, and delegate its responsibilities to other collaborators from the same institutions or elsewhere as managers using NERC's ColdFront interface, as described here.

"},{"location":"get-started/create-a-user-portal-account/#getting-started","title":"Getting Started","text":"

Any faculty, staff, student, and external collaborator must request a user account through the MGHPCC Shared Services (MGHPCC-SS) Account Portal, also known as \"RegApp\". This is a web-based, single point-of-entry to the NERC system that displays a user welcome page. The welcome page of the account registration site displays instructions on how to register a General User account on NERC, as shown in the image below:

There are two options: either register for a new account or manage an existing one. If you are new to NERC and want to register as a new MGHPCC-SS user, click on the \"Register for an Account\" button. This will redirect you to a new web page which shows details about how to register for a new MGHPCC-SS user account. NERC uses CILogon that supports login either using your Institutional Identity Provider (IdP).

Clicking the \"Begin MGHPCC-SS Account Creation Process\" button will initiate the account creation process. You will be redirected to a site managed by CILogon where you will select your institutional or commercial identity provider, as shown below:

Once selected, you will be redirected to your institutional or commercial identity provider, where you will log in, as shown here:

After a successful log on, your browser will be redirected back to the MGHPCC-SS Registration Page and ask for a review and confirmation of creating your account with fetched information to complete the account creation process.

Very Important

If you don't click the \"Create MGHPCC-SS Account\" button, your account will not be created! So, this is a very important step. Review your information carefully and then click on the \"Create MGHPCC-SS Account\" button to save your information. Please review the information, make any corrections that you need and fill in any blank/ missing fields such as \"Research Domain\". Please read the End User Level Agreement (EULA) and accept the terms by checking the checkbox in this form.

Once you have reviewed and verified that all your user information in this form is correct, only then click the \"Create MGHPCC-SS Account\" button. This will automatically send an email to your email address with a link to validate and confirm your account information.

Once you receive an \"MGHPCC-SS Account Creation Validation\" email, review your user account information to ensure it is correct. Then, click on the provided validation web link and enter the unique account creation Confirmation Code provided in the email as shown below:

Once validated, you need to ensure that your user account is created and valid by viewing the following page:

Important Note

If you have an institutional identity, it's preferable to use that identity to create your MGHPCC-SS account. Institutional identities are vetted by identity management teams and provide a higher level of confidence to resource owners when granting access to resources. You can only link one university account to an MGHPCC-SS account; if you have multiple university accounts, you will only be able to link one of those accounts to your MGHPCC-SS account. If, at a later date, you want to change which account is connected to your MGHPCC-SS identity, you can do so by contacting help@mghpcc.org.

"},{"location":"get-started/create-a-user-portal-account/#how-to-update-and-modify-your-mghpcc-ss-account-information","title":"How to update and modify your MGHPCC-SS account information?","text":"
  1. Log in to the RegApp using your MGHPCC-SS account.

  2. Click on \"Manage Your MGHPCC-SS Account\" button as shown below:

  3. Review your currently saved account information, make any necessary corrections or updates to fields, and then click on the \"Update MGHPCC-SS Account\" button.

  4. This will send an email to verify your updated account information, so please check your email address.

  5. Confirm and validate the new account details by clicking the provided validation web link and entering the unique Confirmation Code provided in the email as shown below:

"},{"location":"get-started/create-a-user-portal-account/#how-to-request-a-principal-investigator-pi-account","title":"How to request a Principal Investigator (PI) Account?","text":"

The process for requesting and obtaining a PI Account is relatively simple. You can fill out this NERC Principal Investigator (PI) Account Request form to initiate the process.

Alternatively, users can request a Principal Investigator (PI) user account by submitting a new ticket at the NERC's Support Ticketing System under the \"NERC PI Account Request\" option in the Help Topic dropdown menu, as shown in the image below:

Information

Once your PI user request is reviewed and approved by the NERC's admin, you will receive an email confirmation from NERC's support system, i.e., help@nerc.mghpcc.org. Then, you can access NERC's ColdFront resource allocation management portal using the PI user role, as described here.

"},{"location":"get-started/user-onboarding-on-NERC/","title":"User Onboarding Process Overview","text":"

NERC's Research allocations are available to faculty members and researchers, including postdoctoral researchers and students. In order to get access to resources provided by NERC's computational infrastructure, you must first register and obtain a user account.

The overall user flow can be summarized using the following sequence diagram:

  1. All users including PI need to register to NERC via: https://regapp.mss.mghpcc.org/.

  2. PI will send a request for a Principal Investigator (PI) user account role by submitting: NERC's PI Request Form.

    Alternatively, users can request a Principal Investigator (PI) user account by submitting a new ticket at the NERC's Support Ticketing System under the \"NERC PI Account Request\" option in the Help Topic dropdown menu, as shown in the image below:

    Principal Investigator Eligibility Information

    • MGHPCC consortium members, whereby they enter into an service agreement with MGHPCC for the NERC services.

    • Non-members of MGHPCC can also be PIs of NERC Services, but must also have an active non-member agreement with MGHPCC.

    • External research focused institutions will be considered on a case-by-case basis and are subject to an external customer cost structure.

  3. Wait until the PI request gets approved by the NERC's admin.

  4. Once a PI request is approved, PI can add a new project and also search and add user(s) to the project - Other general user(s) can also see the project(s) once they are added to a project via: https://coldfront.mss.mghpcc.org.

  5. PI or project Manager can request resource allocation either NERC (OpenStack) or NERC-OCP (OpenShift) for the newly added project and select which user(s) can use the requested allocation.

    As a new NERC PI for the first time, am I entitled to any credits?

    As a new PI using NERC for the first time, you might wonder if you get any credits. Yes, you'll receive up to $1000 for the first month only. But remember, this credit can not be used in the following months. Also, it does not apply to GPU resource usage.

  6. Wait until the requested resource allocation gets approved by the NERC's admin.

  7. Once approved, PI and the corresponding project users can go to either NERC Openstack horizon web interface: https://stack.nerc.mghpcc.org or NERC OpenShift web console: https://console.apps.shift.nerc.mghpcc.org based on approved Resource Type and they can start using the NERC's resources based on the approved project quotas.

"},{"location":"get-started/allocation/adding-a-new-allocation/","title":"Adding a new Resource Allocation to the project","text":"

If one resource allocation is not sufficient for a project, PI or project managers may request additional allocations by clicking on the \"Request Resource Allocation\" button on the Allocations section of the project details. This will show the page where all existing users for the project will be listed on the bottom of the request form. PIs can select desired user(s) to make the requested resource allocations available on their NERC's OpenStack or OpenShift projects.

Here, you can view the Resource Type, information about your Allocated Project, status, End Date of the allocation, and actions button or any pending actions as shown below:

"},{"location":"get-started/allocation/adding-a-new-allocation/#adding-a-new-resource-allocation-to-your-openstack-project","title":"Adding a new Resource Allocation to your OpenStack project","text":"

Important: Requested/Approved Allocated OpenStack Storage Quota & Cost

Ensure you choose NERC (OpenStack) in the Resource option and specify your anticipated computing units. Each allocation, whether requested or approved, will be billed based on the pay-as-you-go model. The exception is for Storage quotas, where the cost is determined by your requested and approved allocation values to reserve storage from the total NESE storage pool. For NERC (OpenStack) Resource Allocations, the Storage quotas are specified by the \"OpenStack Volume Quota (GiB)\" and \"OpenStack Swift Quota (GiB)\" allocation attributes. If you have common questions or need more information, refer to our Billing FAQs for comprehensive answers. Keep in mind that you can easily scale and expand your current resource allocations within your project by following this documentation later on.

"},{"location":"get-started/allocation/adding-a-new-allocation/#adding-a-new-resource-allocation-to-your-openshift-project","title":"Adding a new Resource Allocation to your OpenShift project","text":"

Important: Requested/Approved Allocated OpenShift Storage Quota & Cost

Ensure you choose NERC-OCP (OpenShift) in the Resource option (Always Remember: the first option, i.e. NERC (OpenStack) is selected by default!) and specify your anticipated computing units. Each allocation, whether requested or approved, will be billed based on the pay-as-you-go model. The exception is for Storage quotas, where the cost is determined by your requested and approved allocation values to reserve storage from the total NESE storage pool. For NERC-OCP (OpenShift) Resource Allocations, storage quotas are specified by the \"OpenShift Request on Storage Quota (GiB)\" and \"OpenShift Limit on Ephemeral Storage Quota (GiB)\" allocation attributes. If you have common questions or need more information, refer to our Billing FAQs for comprehensive answers. Keep in mind that you can easily scale and expand your current resource allocations within your project by following this documentation later on.

"},{"location":"get-started/allocation/adding-a-project/","title":"A New Project Creation Process","text":""},{"location":"get-started/allocation/adding-a-project/#what-pis-need-to-fill-in-order-to-request-a-project","title":"What PIs need to fill in order to request a Project?","text":"

Once logged in to NERC's ColdFront, PIs can choose Projects sub-menu located under the Project menu.

Clicking on the \"Add a project\" button will show the interface below:

Very Important: Project Title Length Limitation

Please ensure that the project title is both concise and does not exceed a length of 63 characters.

PIs need to specify an appropriate title (less than 63 characters), description of their research work that will be performed on the NERC (in one or two paragraphs), the field(s) of science or research domain(s), and then click the \"Save\" button. Once saved successfully, PIs effectively become the \"manager\" of the project, and are free to add or remove users and also request resource allocation(s) to any Projects for which they are the PI. PIs are permitted to add users to their group, request new allocations, renew expiring allocations, and provide information such as publications and grant data. PIs can maintain all their research information under one project or, if they require, they can separate the work into multiple projects.

"},{"location":"get-started/allocation/allocation-change-request/","title":"Request change to Resource Allocation to an existing project","text":"

If past resource allocation is not sufficient for an existing project, PIs or project managers can request a change by clicking \"Request Change\" button on project resource allocation detail page as show below:

"},{"location":"get-started/allocation/allocation-change-request/#request-change-resource-allocation-attributes-for-openstack-project","title":"Request Change Resource Allocation Attributes for OpenStack Project","text":"

This will bring up the detailed Quota attributes for that project as shown below:

Important: Requested/Approved Allocated OpenStack Storage Quota & Cost

For NERC (OpenStack) resource types, the Storage quotas are controlled by the values of the \"OpenStack Volume Quota (GiB)\" and \"OpenStack Swift Quota (GiB)\" quota attributes. The Storage cost is determined by your requested and approved allocation values for these quota attributes. If you have common questions or need more information, refer to our Billing FAQs for comprehensive answers.

PI or project managers can provide a new value for the individual quota attributes, and give justification for the requested changes so that the NERC admin can review the change request and approve or deny based on justification and quota change request. Then submitting the change request, this will notify the NERC admin about it. Please wait untill the NERC admin approves/ deny the change request to see the change on your resource allocation for the selected project.

Important Information

PI or project managers can put the new values on the textboxes for ONLY quota attributes they want to change others they can be left blank so those quotas will not get changed!

To use GPU resources on your VM, you need to specify the number of GPUs in the \"OpenStack GPU Quota\" attribute. Additionally, ensure that your other quota attributes, namely \"OpenStack Compute vCPU Quota\" and \"OpenStack Compute RAM Quota (MiB)\" have sufficient resources to meet the vCPU and RAM requirements for one of the GPU tier-based flavors. Refer to the GPU Tier documentation for specific requirements and further details on the flavors available for GPU usage.

"},{"location":"get-started/allocation/allocation-change-request/#allocation-change-requests-for-openstack-project","title":"Allocation Change Requests for OpenStack Project","text":"

Once the request is processed by the NERC admin, any user can view that request change trails for the project by looking at the \"Allocation Change Requests\" section that looks like below:

Any user can click on Action button to view the details about the change request. This will show more details about the change request as shown below:

"},{"location":"get-started/allocation/allocation-change-request/#how-to-use-gpu-resources-in-your-openstack-project","title":"How to Use GPU Resources in your OpenStack Project","text":"

Comparison Between CPU and GPU

To learn more about the key differences between CPUs and GPUs, please read this.

A GPU instance is launched in the same way as any other compute instance, with a few considerations to keep in mind:

  1. When launching a GPU based instance, be sure to select one of the GPU Tier based flavor.

  2. You need to have sufficient resource quota to launch the desired flavor. Always ensure you know which GPU-based flavor you want to use, then submit an allocation change request to adjust your current allocation to fit the flavor's resource requirements.

    Resource Required for Launching a VM with \"NVIDIA A100 SXM4 40GB\" Flavor.

    Based on the GPU Tier documentation, NERC provides two variations of NVIDIA A100 SXM4 40GB flavors:

    1. gpu-su-a100sxm4.1: Includes 1 NVIDIA A100 GPU
    2. gpu-su-a100sxm4.2: Includes 2 NVIDIA A100 GPUs

    You should select the flavor that best fits your resource needs and ensure your OpenStack quotas are appropriately configured for the chosen flavor. To use a GPU-based VM flavor, choose the one that best fits your resource needs and make sure your OpenStack quotas meet the required specifications:

    • For the gpu-su-a100sxm4.1 flavor:

      • vCPU: 32
      • RAM (GiB): 240
    • For the gpu-su-a100sxm4.2 flavor:

      • vCPU: 64
      • RAM (GiB): 480

    Ensure that your OpenStack resource quotas are configured as follows:

    • OpenStack GPU Quota: Meets or exceeds the number of GPUs required by the chosen flavor.
    • OpenStack Compute vCPU Quota: Meets or exceeds the vCPU requirement.
    • OpenStack Compute RAM Quota (MiB): Meets or exceeds the RAM requirement.

    Properly configure these quotas to successfully launch a VM with the selected \"gpu-su-a100sxm4\" flavor.

  3. We recommend using ubuntu-22.04-x86_64 as the image for your GPU-based instance because we have tested the NVIDIA driver with this image and obtained good results. That said, it is possible to run a variety of other images as well.

"},{"location":"get-started/allocation/allocation-change-request/#request-change-resource-allocation-attributes-for-openshift-project","title":"Request Change Resource Allocation Attributes for OpenShift Project","text":"

Important: Requested/Approved Allocated OpenShift Storage Quota & Cost

For NERC-OCP (OpenShift) resource types, the Storage quotas are controlled by the values of the \"OpenShift Request on Storage Quota (GiB)\" and \"OpenShift Limit on Ephemeral Storage Quota (GiB)\" quota attributes. The Storage cost is determined by your requested and approved allocation values for these quota attributes.

PI or project managers can provide a new value for the individual quota attributes, and give justification for the requested changes so that the NERC admin can review the change request and approve or deny based on justification and quota change request. Then submitting the change request, this will notify the NERC admin about it. Please wait untill the NERC admin approves/ deny the change request to see the change on your resource allocation for the selected project.

Important Information

PI or project managers can put the new values on the textboxes for ONLY quota attributes they want to change others they can be left blank so those quotas will not get changed!

In order to use GPU resources on your pod, you must specify the number of GPUs you want to use in the \"OpenShift Request on GPU Quota\" attribute.

"},{"location":"get-started/allocation/allocation-change-request/#allocation-change-requests-for-openshift-project","title":"Allocation Change Requests for OpenShift Project","text":"

Once the request is processed by the NERC admin, any user can view that request change trails for the project by looking at the \"Allocation Change Requests\" section that looks like below:

Any user can click on Action button to view the details about the change request. This will show more details about the change request as shown below:

"},{"location":"get-started/allocation/allocation-change-request/#how-to-use-gpu-resources-in-your-openshift-project","title":"How to Use GPU Resources in your OpenShift Project","text":"

Comparison Between CPU and GPU

To learn more about the key differences between CPUs and GPUs, please read this.

For OpenShift pods, we can specify different types of GPUs. Since OpenShift is not based on flavors, we can customize the resources as needed at the pod level while still utilizing GPU resources.

You can read about how to specify a pod to use a GPU here.

Also, you will be able to select a different GPU device for your workload, as explained here.

"},{"location":"get-started/allocation/allocation-details/","title":"Allocation details","text":"

Access to ColdFront's allocations details is based on user roles. PIs and managers see the same allocation details as users, and can also add project users to the allocation, if they're not already on it, and remove users from an allocation.

"},{"location":"get-started/allocation/allocation-details/#how-to-view-resource-allocation-details-in-the-project","title":"How to View Resource Allocation Details in the Project","text":"

A single project can have multiple allocations. To view details about a specific resource allocation, click on any of the available allocations in the Allocations section of the project details. Here, you can view the Resource Type, information about your Allocated Project, status, End Date of the allocation, and Actions button or any pending actions as shown below:

Clicking the Action icon (shown as a folder icon on the right side of each allocation, as seen in the image above) for the corresponding allocation will open a page displaying detailed information about that allocation. You can access either the PI and Manager View or General User View of the allocation detail page for OpenStack or OpenShift Resource Allocation, depending on your role in the project.

"},{"location":"get-started/allocation/allocation-details/#how-to-find-id-of-the-resource-allocation","title":"How to find ID of the Resource Allocation","text":"

After clicking the Action button for the corresponding allocation, you will be redirected to a new allocation detail page. The web browser will display the URL in the following format:

https://coldfront.mss.mghpcc.org/allocation/<Allocation_ID>/\n

To find the ID of the resource allocation, observe the URL and note the <Allocation_ID> part. For example, in the URL https://coldfront.mss.mghpcc.org/allocation/1/, the resource Allocation ID is 1.

"},{"location":"get-started/allocation/allocation-details/#pi-and-manager-view","title":"PI and Manager View","text":"

PIs and managers can view important details of the project and underlying allocations. It shows all allocations including start and end dates, creation and last modified dates, users on the allocation and public allocation attributes. PIs and managers can add or remove users from allocations.

"},{"location":"get-started/allocation/allocation-details/#pi-and-manager-allocation-view-of-openstack-resource-allocation","title":"PI and Manager Allocation View of OpenStack Resource Allocation","text":""},{"location":"get-started/allocation/allocation-details/#pi-and-manager-allocation-view-of-openshift-resource-allocation","title":"PI and Manager Allocation View of OpenShift Resource Allocation","text":""},{"location":"get-started/allocation/allocation-details/#general-user-view","title":"General User View","text":"

General Users who are not PIs or Managers on a project see a read-only view of the allocation details. If a user is on a project but not a particular allocation, they will not be able to see the allocation in the Project view nor will they be able to access the Allocation detail page.

"},{"location":"get-started/allocation/allocation-details/#general-user-view-of-openstack-resource-allocation","title":"General User View of OpenStack Resource Allocation","text":""},{"location":"get-started/allocation/allocation-details/#general-user-view-of-openshift-resource-allocation","title":"General User View of OpenShift Resource Allocation","text":""},{"location":"get-started/allocation/archiving-a-project/","title":"Archiving an Existing Project","text":"

Only a PI can archive their ColdFront project(s) by accessing NERC's ColdFront interface.

Important Note

If you archive a project then this will expire all your allocations on that project, which will clean up and also disable your group's access to the resources in those allocations. Also, you cannot make any changes to archived projects.

Once archived it is no longer visible on your projects list. All archived projects will be listed under your archived projects, which can be viewed by clicking the \"View archived projects\" button as shown below:

All your archived projects are displayed here:

"},{"location":"get-started/allocation/coldfront/","title":"What is NERC's ColdFront?","text":"

NERC uses NERC's ColdFront interface, an open source resource allocation management system called ColdFront to provide a single point-of-entry for administration, reporting, and measuring scientific impact of NERC resources for PI.

Learning ColdFront

A collection of animated gifs showcasing common functions in ColdFront is available, providing helpful insights into how these features can be utilized.

"},{"location":"get-started/allocation/coldfront/#how-to-get-access-to-nercs-coldfront","title":"How to get access to NERC's ColdFront","text":"

Any users who had registerd their user accounts through the MGHPCC Shared Services (MGHPCC-SS) Account Portal also known as \"RegApp\" can get access to NERC's ColdFront interface.

General Users who are not PIs or Managers on a project see a read-only view of the NERC's ColdFront as described here.

Whereas, once a PI Account request is granted, the PI will receive an email confirming the request approval and how to connect NERC's ColdFront.

PI or project managers can use NERC's ColdFront as a self-service web-portal that can see an administrative view of it as described here and can do the following tasks:

  • Only PI can add a new project and archive any existing project(s)

  • Manage existing projects

  • Request allocations that fall under projects in NERC's resources such as clusters, cloud resources, servers, storage, and software licenses

  • Add/remove user access to/from allocated resources who is a member of the project without requiring system administrator interaction

  • Elevate selected users to 'manager' status, allowing them to handle some of the PI asks such as request new resource allocations, add/remove users to/from resource allocations, add project data such as grants and publications

  • Monitor resource utilization such as storage and cloud usage

  • Receive email notifications for expiring/renewing access to resources as well as notifications when allocations change status - i.e. Active, Active (Needs Renewal), Denied, Expired

  • Provide information such as grants, publications, and other reportable data for periodic review by center director to demonstrate need for the resources

"},{"location":"get-started/allocation/coldfront/#how-to-login-to-nercs-coldfront","title":"How to login to NERC's ColdFront?","text":"

NERC's ColdFront interface provides users with login page as shown here:

Please click on \"Log In\" button. Then, it will show the login interface as shown below:

You need to click on \"Log in via OpenID Connect\" button. This will redirect you to CILogon welcome page where you can select your appropriate Identity Provider as shown below:

Once successful, you will be redirected to the ColdFront's main dashboard as shown below:

"},{"location":"get-started/allocation/manage-users-to-a-project/","title":"Managing Users in the Project","text":""},{"location":"get-started/allocation/manage-users-to-a-project/#addremove-users-tofrom-a-project","title":"Add/Remove User(s) to/from a Project","text":"

A user can only view projects they are on. PIs or managers can add or remove users from their respective projects by navigating to the Users section of the project.

Once we click on the \"Add Users\" button, it will show us the following search interface:

Searching multiple users at once!

If you want to simultaneously search for multiple users in the system, you can input multiple usernames separated by space or newline, as shown below:

NOTE: This will return a list of all users matching those provided usernames only if they exist.

They can search for any users in the system that are not already part of the project by providing exact matched username or partial text of other multiple fields. The search results show details about the user account such as email address, username, first name, last name etc. as shown below:

Delegating user as 'Manager'

When adding a user to your project you can optionally designate them as a \"Manager\" by selecting their role using the drop down next to their email. Read more about user roles here.

Thus, found user(s) can be selected and assigned directly to the available resource allocation(s) on the given project using this interface. While adding the users, their Role also can be selected from the dropdown options as either User or Manager. Once confirmed with selection of user(s) their roles and allocations, click on the \"Add Selected Users to Project\" button.

Removing Users from the Project is straightforward by just clicking on the \"Remove Users\" button. Then it shows the following interface:

PI or project managers can select the user(s) and then click on the \"Remove Selected Users From Project\" button.

"},{"location":"get-started/allocation/manage-users-to-a-project/#user-roles","title":"User Roles","text":"

Access to ColdFront is role based so users see a read-only view of the allocation details for any allocations they are on. PIs see the same allocation details as general users and can also add project users to the allocation if they're not already on it. Even on the first time, PIs add any user to the project as the User role. Later PI or project managers can delegate users on their project to the 'manager' role. This allows multiple managers on the same project. This provides the user with the same access and abilities as the PI. A \"Manager\" is a user who has the same permissions as the PI to add/remove users, request/renew allocations, add/remove project info such as grants, publications, and research output. Managers may also complete the annual project review.

What can a PI do that a manager can't?

The only tasks a PI can do that a manager can't is create a new project or archive any existing project(s). All other project-related actions that a PI can perform can also be accomplished by any one of the managers assigned to that project.

General User Accounts are not able to create/update projects and request Resource Allocations. Instead, these accounts must be associated with a Project that has Resources. General User accounts that are associated with a Project have access to view their project details and use all the resources associated with the Project on NERC.

General Users (not PIs or Managers) can turn off email notifications at the project level. PIs also have the 'manager' status on a project. Managers can't turn off their notifications. This ensures they continue to get allocation expiration notification emails.

"},{"location":"get-started/allocation/manage-users-to-a-project/#delegating-user-to-manager-role","title":"Delegating User to Manager Role","text":"

You can also modify a users role of existing project users at any time by clicking on the Edit button next to the user's name.

To change a user's role to 'manager' click on the edit icon next to the user's name on the Project Detail page:

Then toggle the \"Role\" from User to Manager:

Very Important

Make sure to click the \"Update\" button to save the change.

This delegation of \"Manager\" role can also be done when adding a user to your project. You can optionally designate them as a \"Manager\" by selecting their role using the drop down next to their email as described here.

"},{"location":"get-started/allocation/manage-users-to-a-project/#notifications","title":"Notifications","text":"

All users on a project will receive notifications about allocations including reminders of upcoming expiration dates and status changes. Users may uncheck the box next to their username to turn off notifications. Managers and PIs on the project are not able to turn off notifications.

"},{"location":"get-started/allocation/managing-users-to-an-allocation/","title":"Adding and removing project Users to project Resource Allocation","text":"

Any available users who were not added previously on a given project can be added to resource allocation by clicking on the \"Add Users\" button as shown below:

Once Clicked it will show the following interface where PIs can select the available user(s) on the checkboxes and click on the \"Add Selected Users to Allocation\" button.

Very Important

The desired user must already be on the project to be added to the allocation.

Removing Users from the Resource Allocation is straightforward by just clicking on the \"Remove Users\" button. Then it shows the following interface:

PI or project managers can select the user(s) on the checkboxes and then click on the \"Remove Selected Users From Project\" button.

"},{"location":"get-started/allocation/project-and-allocation-review/","title":"Project and Individual Allocation Annual Review Process","text":""},{"location":"get-started/allocation/project-and-allocation-review/#project-annual-review-process","title":"Project Annual Review Process","text":"

NERC's ColdFront allows annual project reviews for NERC admins by mandating PIs to assess and update their projects. With the Project Review feature activated, each project undergoes a mandatory review every 365 days. During this process, PIs update project details, confirm project members, and input publications, grants, and research outcomes from the preceding year.

Required Project Review

The PI or any manager(s) of a project must complete the project review once every 365 days. ColdFront does not send notifications to PIs when project reviews are due. Instead, when the PI or Manager(s) of a project views their project they will find the notification that the project review is due. Additionally, when the project review is pending, PIs or Project Manager(s) cannot request new allocations or renew expiring allocations or change request to update the allocated allocation attributes' values. This is to enforce PIs need to review their projects annually. The PI or any managers on the project are able to complete the project review process.

"},{"location":"get-started/allocation/project-and-allocation-review/#project-reviews-by-pis-or-project-managers","title":"Project Reviews by PIs or Project Manager(s)","text":"

When a PI or any Project Manager(s) of a project logs into NERC's ColdFront web console and their project review is due, they will see a banner next to the project name on the home page:

If they try to request a new allocation or renew an expiring allocation or change request to update the allocated allocation attributes' values, they will get an error message:

"},{"location":"get-started/allocation/project-and-allocation-review/#project-review-steps","title":"Project Review Steps","text":"

When they click on the \"Review Project\" link they're presented with the requirements and a description of why we're asking for this update:

The links in each step direct them to different parts of their Project Detail page. This review page lists the dates when grants and publications were last updated. If there are no grant or publications or at least one of them hasn't been udpated in the last year, we ask for a reason they're not updating the project information. This helps encourage PIs to provide updates if they have them. If not, they provide a reason and this is displayed for the NERC admins as part of the review process.

Once the project review page is completed, the PI is redirected to the project detail page and they see the status change to \"project review pending\".

"},{"location":"get-started/allocation/project-and-allocation-review/#allocation-renewals","title":"Allocation Renewals","text":"

When the requested allocation is approved, it must have an \"End Date\" - which is normally 365 days or 1 year from the date it is approved i.e. \"Start Date\". Automated emails are triggered to all users on an allocation when the end date is 60 days away, 30 days, 7 days, and then set the allocation status to \"Active (Needs Renewal)\", unless the user turns off notifications on the project.

Very Important: Urgent Allocation Renewal is Required Before End Date

If the allocation renewal isn't processed prior to the original allocation end date by the PI or Manager, the allocation will set the allocation status to \"Active (Needs Renewal)\" and the allocation users will get a notification email letting them know the allocation needs renewal!

Currently, a project will continue to be able to utilize allocations even after the allocation end date, which will result in ongoing costs for you. Such allocation will be marked as \"Active (Needs Renewal)\" as shown below:

Allocation renewals may not require any additions or changes to the allocation attributes from the PI or Manager. By default, if the PI or Manager clicks on the 'Activate' button as shown below:

Then it will prompt for confirmation and allow the admin to review and submit the activation request by clicking on 'Submit' button as shown below:

Emails are sent to all allocation users letting them know the renewal request has been submitted.

Then the allocation status will change to \"Renewal Requested\" as shown below:

Once the renewal request is reviewed and approved by NERC admins, it will change into \"Active\" status and the expiration date is set to another 365 days as shown below:

Then an automated email notification will be sent to the PI and all users on the allocation that have enabled email notifications.

"},{"location":"get-started/allocation/project-and-allocation-review/#cost-associated-with-allocations-that-need-renewal-after-end-date","title":"Cost Associated with Allocations that Need Renewal after \"End Date\"","text":"

Currently, a project will continue be able to utilize allocations even after their \"End Date\", resulting in ongoing costs for you. Such allocations will be marked as \"Active (Needs Renewal)\". In the future, we plan to change this behavior so that allocations after end date will prevent associated VMs/pods from starting and may cause active VMs/pods to cease running.

"},{"location":"get-started/allocation/requesting-an-allocation/","title":"How to request a new Resource Allocation","text":"

On the Project Detail page the project PI/manager(s) can request an allocation by clicking the \"Request Resource Allocation\" button as shown below:

On the shown page, you will be able to choose either OpenStack Resource Allocation or OpenShift Resource Allocation by specifying either NERC (OpenStack) or NERC-OCP (OpenShift) in the Resource dropdown option. Note: The first option i.e. NERC (OpenStack), is selected by default.

Default GPU Resource Quota for Initial Allocation Requests

By default, the GPU resource quota is set to 0 for the initial resource allocation request for both OpenStack and OpenShift Resource Types. However, you will be able to change request and adjust the corresponding GPU quotas for both after they are approved for the first time. For NERC's OpenStack, please follow this guide on how to utilize GPU resources in your OpenStack project. For NERC's OpenShift, refer to this reference to learn about how to use GPU resources in pod level.

"},{"location":"get-started/allocation/requesting-an-allocation/#request-a-new-openstack-resource-allocation-for-an-openstack-project","title":"Request A New OpenStack Resource Allocation for an OpenStack Project","text":"

If users have already been added to the project as described here, the Users selection section will be displayed as shown below:

In this section, the project PI/manager(s) can choose user(s) from the project to be included in this allocation before clicking the \"Submit\" button.

Read the End User License Agreement Before Submission

You should read the shown End User License Agreement (the \"Agreement\"). By clicking the \"Submit\" button, you agree to the Terms and Conditions.

Important: Requested/Approved Allocated OpenStack Storage Quota & Cost

Ensure you choose NERC (OpenStack) in the Resource option and specify your anticipated computing units. Each allocation, whether requested or approved, will be billed based on the pay-as-you-go model. The exception is for Storage quotas, where the cost is determined by your requested and approved allocation values to reserve storage from the total NESE storage pool. For NERC (OpenStack) Resource Allocations, the Storage quotas are specified by the \"OpenStack Volume Quota (GiB)\" and \"OpenStack Swift Quota (GiB)\" allocation attributes. If you have common questions or need more information, refer to our Billing FAQs for comprehensive answers. Keep in mind that you can easily scale and expand your current resource allocations within your project by following this documentation later on.

"},{"location":"get-started/allocation/requesting-an-allocation/#resource-allocation-quotas-for-openstack-project","title":"Resource Allocation Quotas for OpenStack Project","text":"

The amount of quota to start out a resource allocation after approval, can be specified using an integer field in the resource allocation request form as shown above. The provided unit value is computed as PI or project managers request resource quota. The basic unit of computational resources is defined in terms of integer value that corresponds to multiple OpenStack resource quotas. For example, 1 Unit corresponds to:

Resource Name Quota Amount x Unit Instances 1 vCPUs 1 GPU 0 RAM(MiB) 4096 Volumes 2 Volume Storage(GiB) 20 Object Storage(GiB) 1

Information

By default, 2 OpenStack Floating IPs, 10 Volume Snapshots and 10 Security Groups are provided to each approved project regardless of units of requested quota units.

"},{"location":"get-started/allocation/requesting-an-allocation/#request-a-new-openshift-resource-allocation-for-an-openshift-project","title":"Request A New OpenShift Resource Allocation for an OpenShift project","text":"

If users have already been added to the project as described here, the Users selection section will be displayed as shown below:

In this section, the project PI/manager(s) can choose user(s) from the project to be included in this allocation before clicking the \"Submit\" button.

Read the End User License Agreement Before Submission

You should read the shown End User License Agreement (the \"Agreement\"). By clicking the \"Submit\" button, you agree to the Terms and Conditions.

"},{"location":"get-started/allocation/requesting-an-allocation/#resource-allocation-quotas-for-openshift-project","title":"Resource Allocation Quotas for OpenShift Project","text":"

The amount of quota to start out a resource allocation after approval, can be specified using an integer field in the resource allocation request form as shown above. The provided unit value is computed as PI or project managers request resource quota. The basic unit of computational resources is defined in terms of integer value that corresponds to multiple OpenShift resource quotas. For example, 1 Unit corresponds to:

Resource Name Quota Amount x Unit vCPUs 1 GPU 0 RAM(MiB) 4096 Persistent Volume Claims (PVC) 2 Storage(GiB) 20 Ephemeral Storage(GiB) 5

Important: Requested/Approved Allocated OpenShift Storage Quota & Cost

Ensure you choose NERC-OCP (OpenShift) in the Resource option (Always Remember: the first option, i.e. NERC (OpenStack) is selected by default!) and specify your anticipated computing units. Each allocation, whether requested or approved, will be billed based on the pay-as-you-go model. The exception is for Storage quotas, where the cost is determined by your requested and approved allocation values to reserve storage from the total NESE storage pool. For NERC-OCP (OpenShift) Resource Allocations, storage quotas are specified by the \"OpenShift Request on Storage Quota (GiB)\" and \"OpenShift Limit on Ephemeral Storage Quota (GiB)\" allocation attributes. If you have common questions or need more information, refer to our Billing FAQs for comprehensive answers. Keep in mind that you can easily scale and expand your current resource allocations within your project by following this documentation later on.

"},{"location":"get-started/best-practices/best-practices-for-bu/","title":"Best Practices for Boston University","text":""},{"location":"get-started/best-practices/best-practices-for-bu/#further-references","title":"Further References","text":"

https://www.bu.edu/tech/services/security/cyber-security/sensitive-data/

https://www.bu.edu/tech/support/information-security/

https://www.bu.edu/tech/about/security-resources/bestpractice/

"},{"location":"get-started/best-practices/best-practices-for-harvard/","title":"Securing Your Public Facing Server","text":""},{"location":"get-started/best-practices/best-practices-for-harvard/#overview","title":"Overview","text":"

This document is aimed to provide you with a few concrete actions you can take to significantly enhance the security of your devices. This advice can be enabled even if your servers are not public facing. However, we strongly recommend implementing these steps if your servers are intended to be accessible to the internet at large.

All recommendations and guidance are guided by our policy that has specific requirements, the current policy/requirements for servers at NERC can be found here.

Harvard University Security Policy Information

Please note that all assets deployed to your NERC project must be compliant with University Security policies. Please familiarize yourself with the Harvard University Information Security Policy and your role in securing data. If you have any questions about how Security should be implemented in the Cloud, please contact your school security officer: \"Havard Security Officer\".

"},{"location":"get-started/best-practices/best-practices-for-harvard/#know-your-data","title":"Know Your Data","text":"

Depending on the data that exists on your servers, you may have to take added or specific steps to safeguard that data. At Harvard, we developed a scale of data classification ranging from 1 to 5 in order of increasing data sensitivity.

We have prepared added guidance with examples for both Administrative Data and Research Data.

Additionally, if your work involved individuals situated in a European Economic Area, you may be subject to the requirements of the General Data Protection Regulations and more information about your responsibilities can be found here.

"},{"location":"get-started/best-practices/best-practices-for-harvard/#host-protection","title":"Host Protection","text":"

The primary focus of this guide is to provide you with security essentials that we support and that you can implement with little effort.

"},{"location":"get-started/best-practices/best-practices-for-harvard/#endpoint-protection","title":"Endpoint Protection","text":"

Harvard University uses the endpoint protection service: Crowdstrike, which actively checks a machine for indication of malicious activity and will act to both block the activity and remediate the issue. This service is offered free to our community members and requires the installation of an agent on the server that runs transparently. This software enables the Harvard security team to review security events and act as needed.

Crowdstrike can be downloaded from our repository at: agents.itsec.harvard.edu this software is required for all devices owned by Harvard staff/faculty and available for all operating systems.

Please note

To acess this repository you need to be in Harvard Campus Network.

"},{"location":"get-started/best-practices/best-practices-for-harvard/#patchupdate-regularly","title":"Patch/Update Regularly","text":"

It is common that vendors/developers will announce that they have discovered a new vulnerability in the software you may be using. A lot of these vulnerabilities are addressed by new releases that the developer issues. Keeping your software and server operating system up to date with current versions ensures that you are using a version of the software that does not have any known/published vulnerabilities.

"},{"location":"get-started/best-practices/best-practices-for-harvard/#vulnerability-management","title":"Vulnerability Management","text":"

Various software versions have historically been found to be vulnerable to specific attacks and exploits. The risk of running older versions of software is that you may be exposing your machine to a possible known method of attack.

To assess which attacks you might be vulnerable to and be provided with specific remediation guidance, we recommend enrolling your servers with our Tenable service which periodically scans the software on your server and correlates the software information with a database of published vulnerabilities. This service will enable you to prioritize which component you need to upgrade or otherwise define which vulnerabilities you may be exposed to.

The Tenable agent run transparently and can be enabled to work according to the parameters set for your school; the agent can be downloaded here and configuration support can be found by filing a support request via HUIT support ticketing system: ServiceNow.

"},{"location":"get-started/best-practices/best-practices-for-harvard/#safer-applications-development","title":"Safer Applications/ Development","text":"

Every application has its own unique operational constraints/requirements, and the advice below cannot be comprehensive however we can offer a few general recommendations

"},{"location":"get-started/best-practices/best-practices-for-harvard/#secure-credential-management","title":"Secure Credential Management","text":"

Credentials should not be kept on the server, nor should they be included directly in your programming logic.

Attackers often review running code on the server to see if they can obtain any sensitive credentials that may have been included in each script. To better manage your credentials, we recommend either using:

  • 1password Credential Manager

  • AWS Secrets

"},{"location":"get-started/best-practices/best-practices-for-harvard/#not-running-the-application-as-the-rootsuperuser","title":"Not Running the Application as the Root/Superuser","text":"

Frequently an application needs special permissions and access and often it is easiest to run an application in the root/superuser account. This is a dangerous practice since the application, when compromised, gives attackers an account with full administrative privileges. Instead, configuring the application to run with an account with only the permissions it needs to run is a way to minimize the impact of a given compromise.

"},{"location":"get-started/best-practices/best-practices-for-harvard/#safer-networking","title":"Safer Networking","text":"

The goal in safer networking is to minimize the areas that an attacker can target.

"},{"location":"get-started/best-practices/best-practices-for-harvard/#minimize-publicly-exposed-services","title":"Minimize Publicly Exposed Services","text":"

Every port/service open to the internet will be scanned to access your servers. We recommend that any service/port that is not needed to be accessed by the public be placed behind the campus firewall. This will significantly reduce the number of attempts by attackers to compromise your servers.

In practice this usually means that you only expose posts 80/443 which enables you to serve websites, while you keep all other services such as SSH, WordPress-logins, etc behind the campus firewall.

"},{"location":"get-started/best-practices/best-practices-for-harvard/#strengthen-ssh-logins","title":"Strengthen SSH Logins","text":"

Where possible, and if needed, logins to a Harvard service should be placed behind Harvardkey. For researchers however, the preferred login method is usually SSH and we recommend the following ways to strengthen your SSH accounts

  1. Disable password only logins

    • In file /etc/ssh/sshd_config change PasswordAuthentication to no to disable tunneled clear text passwords i.e. PasswordAuthentication no.

    • Uncomment the permit empty passwords option in the second line, and, if needed, change yes to no i.e. PermitEmptyPasswords no.

    • Then run service ssh restart.

  2. Use SSH keys with passwords enabled on them

  3. If possible, enroll the SSH service with a Two-factor authentication provider such as DUO or YubiKey.

"},{"location":"get-started/best-practices/best-practices-for-harvard/#attack-detection","title":"Attack Detection","text":"

Despite the best protection, a sophisticated attacker may still find a way to compromise your servers and in those scenarios, we want to enhance your ability to detect activity that may be suspicious.

"},{"location":"get-started/best-practices/best-practices-for-harvard/#install-crowdstrike","title":"Install Crowdstrike","text":"

As stated above, Crowdstrike is both an endpoint protection service and also an endpoint detection service. This software understands activities that might be benign in isolation but coupled with other actions on the device may be indicative of a compromise. It also enables the quickest security response.

Crowdstrike can be downloaded from our repository at: agents.itsec.harvard.edu this software is needed for all devices owned by Harvard staff/faculty and available for all operating systems.

"},{"location":"get-started/best-practices/best-practices-for-harvard/#safeguard-your-system-logs","title":"Safeguard your System Logs","text":"

System logs are logs that check and track activity on your servers, including logins, installed applications, errors and more.

Sophisticated attackers will try to delete these logs to frustrate investigations and prevent discovery of their attacks. To ensure that your logs are still accessible and available for review, we recommend that you configure your logs to be sent to a system separate from your servers. This can be either sending logs to an external file storage repository. Or configuring a separate logging system using Splunk.

For help setting up logging please file a support request via our support ticketing system: ServiceNow.

"},{"location":"get-started/best-practices/best-practices-for-harvard/#escalating-an-issue","title":"Escalating an Issue","text":"

There are several ways you can report a security issue and they are all documented on HUIT Internet Security and Data Privacy group site.

In the event you suspect a security issue has occurred or wanted someone to supply a security assessment, please feel free to reach out to the HUIT Internet Security and Data Privacy group, specifically the Operations & Engineering team.

  • Email Harvard ITSEC-OPS

  • Service Queue

  • Harvard HUIT Slack Channel: #isdp-public

"},{"location":"get-started/best-practices/best-practices-for-harvard/#further-references","title":"Further References","text":"

https://policy.security.harvard.edu/all-servers

https://enterprisearchitecture.harvard.edu/security-minimal-viable-product-requirements-huit-hostedmanaged-server-instances

https://policy.security.harvard.edu/security-requirements

"},{"location":"get-started/best-practices/best-practices-for-my-institution/","title":"Best Practices for My Institution","text":""},{"location":"get-started/best-practices/best-practices-for-my-institution/#institutions-with-the-best-practices-outlines","title":"Institutions with the Best Practices outlines","text":"

The following institutions using our services have already provided guidelines for best practices:

  1. Harvard University

  2. Boston University

Upcoming Best Practices for other institutions

We are in the process of obtaining Best Practices for institutions not listed above.

If your institution already have outlined Best Practices guidelines with your internal IT department, please contact us to list it here soon by emailing us at help@nerc.mghpcc.org or, by submitting a new ticket at the NERC's Support Ticketing System.

"},{"location":"get-started/best-practices/best-practices/","title":"Best Practices for the NERC Users","text":"

By 2025, according to Gartner's forecast, the responsibility for approximately 99% of cloud security failures will likely lie with customers. These failures can be attributed to the difficulties in gauging and overseeing risks associated with on-prem cloud security. The MGHPCC will enter into a lightweight Memorandum of Understanding (MOU) with each institutional customer that consumes NERC services and that will also clearly explain about the security risks and some of the shared responsibilities for the customers while using the NERC. This ensures roles and responsibilities are distinctly understood by each party.

NERC Principal Investigators (PIs): PIs are ultimately responsible for their end-users and the security of the systems and applications that are deployed as part of their project(s) on NERC. This includes being responsible for the security of their data hosted on the NERC as well as users, accounts and access management.

Every individual user needs to comply with your Institution\u2019s Security and Privacy policies to protect their Data, Endpoints, Accounts and Access management. They must ensure any data created on or uploaded to the NERC is adequately secured. Each customer has complete control over their systems, networks and assets. It is essential to restrict access to the NERC provided user environment only to authorized users by using secure identity and access management. Furthermore, users have authority over various credential-related aspects, including secure login mechanisms, single sign-on (SSO), and multifactor authentication.

Under this model, we are responsible for operation of the physical infrastructure that includes responsibility for protecting, patching and maintaining underlying virtualization layer, servers, disks, storage, network gears, other hardwares, and softwares. Whereas NERC users are responsible for the security of the guest operating system (OS) and software stack i.e. databases used to run their applications and data. They are also entrusted with safeguarding middleware, containers, workloads, and any code or data generated by the platform.

All NERC users are responsible for their use of NERC services, which include:

  • Following the best practices for security on NERC services. Please review your institutional guidelines next.

  • Complying with security policies regarding VMs and containers. NERC admins are not responsible for maintaining or deploying VMs or containers created by PIs for their projects. See Harvard University and Boston University policies here. We will be adding more institutions under this page soon. Without prior notice, NERC reserves the right to shut down any VM or container that is causing internal or external problems or violating these policies.

  • Adhering to institutional restrictions and compliance policies around the data they upload and provide access to/from NERC. At NERC, we only offer users to store internal data in which information is chosen to keep confidential but the disclosure of which would not cause material harm to you, your users and your institution. Your institution may have already classified and categorized data and implemented security policies and guidance for each category. If your project includes sensitive data and information then you might need to contact NERC's admin as soon as possible to discuss other potential options.

  • Backups and/or snapshots are the user's responsibility for volumes/data, configurations, objects, and their state, which are useful in the case when users accidentally delete/lose their data. NERC admins cannot recover lost data. In addition, while NERC stores data with high redundancy to deal with computer or disk failures, PIs should ensure they have off-site backups for disaster recovery, e.g., to deal with occasional disruptions and outages due to the natural disasters that impact the MGHPCC data center.

"},{"location":"get-started/cost-billing/billing-faqs/","title":"Billing Frequently Asked Questions (FAQs)","text":"

Our primary focus is to deliver outstanding on-prem cloud services, prioritizing reliability, security, and cutting-edge solutions to meet your research and teaching requirements. To achieve this, we have implemented a cost-effective pricing model that enables us to maintain, enhance, and sustain the quality of our services. By adopting consistent cost structures across all institutions, we can make strategic investments in infrastructure, expand our service portfolio, and enhance our support capabilities for a seamless user experience.

Most of the institutions using our services have an MOU (Memorandum Of Understanding) with us to be better aligned to a number of research regulations, policies and requirements but if your institution does not have an MOU with us, please have someone from your faculty or administration contact us to discuss it soon by emailing us at help@nerc.mghpcc.org or, by submitting a new ticket at the NERC's Support Ticketing System.

"},{"location":"get-started/cost-billing/billing-faqs/#questions-answers","title":"Questions & Answers","text":"1. As a new NERC PI for the first time, am I entitled to any credits?
  • Yes, you will receive up to $1000 of credit for the first month only.

  • This credit is not transferable to subsequent months.

  • This does not apply to the usage of GPU resources.

2. How often will I be billed?

You or your institution will be billed monthly within the first week of each month.

3. If I have an issue with my bill, who do I contact?

Please send your requests by emailing us at help@nerc.mghpcc.org or, by submitting a new ticket at the NERC's Support Ticketing System.

4. How do I control costs?

Upon creating a project, you will set these resource limits (quotas) for OpenStack (VMs), OpenShift (containers), and storage through ColdFront. This is the maximum amount of resources you can consume at one time.

5. Are we invoicing for CPUs/GPUs only when the VM or Pod is active?

Yes. You will only be billed based on your utilization (cores, memory, GPU) when VMs exist (even if they are Stopped!) or when pods are running. Utilization will be translated into billable Service Units (SUs).

Persistent storage related to an OpenStack VM or OpenShift Pod will continue to be billed even when the VM is stopped or the Pod is not running.

6. Am I going to incur costs for allocations after end date?

Currently, a project will continue be able to utilize allocations even after their \"End Date\", resulting in ongoing costs for you. Such allocations will be marked as \"Active (Needs Renewal)\". In the future, we plan to change this behavior so that allocations after end date will prevent associated VMs/pods from starting and may cause active VMs/pods to cease running.

7. Are VMs invoiced even when shut down?

Yes, as long as VMs are using resources they are invoiced. In order not to be billed for a VM you must delete the Instance/VM. It is a good idea to create a snapshot of your VM prior to deleting it.

8. Will OpenStack & OpenShift show on a single invoice?

Yes. In the near future customers of NERC will be able to view per project service utilization via the XDMoD tool.

9. What happens when a Flavor is expanded during the month?

a. Flavors cannot be expanded.

b. You can create a snapshot of an existing VM/Instance and, with that snapshot, deploy a new flavor of VM/Instance.

10. Is storage charged separately?

Yes, but on the same invoice. To learn more, see our page on Storage.

11. Will I be charged for storage attached to shut-off instances?

Yes.

12. Are we Invoicing Storage using ColdFront Requests or resource usage?

a. Storage is invoiced based on Coldfront Requests.

b. When you request additional storage through Coldfront, invoicing on that additional storage will occur when your request is fulfilled. When you request a decrease in storage through Request change using ColdFront, your invoicing will adjust accordingly when your request is made. In both cases 'invoicing' means 'accumulate hours for whatever storage quantity was added or removed'.

For example:

  1. I request an increase in storage, the request is approved and processed.

    • At this point we start Invoicing.
  2. I request a decrease in storage.

    • The invoicing for that storage stops immediately.
13. For OpenShift, what values are we using to track CPU & Memory?

a. For invoicing we utilize requests.cpu for tracking CPU utilization & requests.memory for tracking memory utilization.

b. Utilization will be capped based on the limits you set in ColdFront for your resource allocations.

14. If a single Pod exceeds the resources for a GPU SU, how is it invoiced?

It will be invoiced as 2 or more GPU SU's depending on how many multiples of the resources it exceeds.

15. How often will we change the pricing?

a. Our current plan is no more than once a year for existing offerings.

b. Additional offerings may be added throughout the year (i.e. new types of hardware or storage).

16. Is there any NERC Pricing Calculator?

Yes. Start your estimate with no commitment based on your resource needs by using this online tool. For more information about how to use this tool, see How to use the NERC Pricing Calculator.

"},{"location":"get-started/cost-billing/billing-process-for-bu/","title":"Billing Process for Boston University","text":"

Boston University has elected to receive a centralized invoice for its university investigators and their designated user\u2019s use of NERC services. IS&T will then internally recover the cost from investigators. The process for cost recovery is currently being implemented, and we will reach out to investigators once the process is complete to obtain internal funding information to process your monthly bill.

"},{"location":"get-started/cost-billing/billing-process-for-bu/#subsidization-of-boston-universitys-use-of-nerc","title":"Subsidization of Boston University\u2019s Use of NERC","text":"

Boston University will subsidize a portion of NERC usage by its investigators. The University will subsidize $100 per month of an investigator\u2019s total usage on NERC, regardless of the number of NERC projects an investigator has established. Monthly subsidies cannot be carried over to subsequent months. The subsidized amount and method are subject to change, and any adjustments will be conveyed directly to investigators and updated on this page.

Please direct any questions about BU\u2019s billing process by emailing us at help@nerc.mghpcc.org or submitting a new ticket to the the NERC's Support Ticketing System. Questions about a specific invoice that you have received can be sent to IST-ISR-NERC@bu.edu.

"},{"location":"get-started/cost-billing/billing-process-for-harvard/","title":"Billing Process for Harvard University","text":"

Direct Billing for NERC is a convenience service for Harvard Faculty and Departments. HUIT will pay the monthly invoices and then allocate the monthly usage costs on the Harvard University General Ledger. This follows a similar pattern with how other Public Cloud Providers (AWS, Azure, GCP) accounts are billed and leverage the HUIT Central Billing Portal. Your HUIT Customer Code will be matched to your NERC Project Allocation Name as a Billing Asset. In this process you will be asked for your GL billing code, which you can change as needed per project. Please be cognizant that only a single billing code is allowed per billing asset. Therefore, if you have multiple projects with different funds, if you are able, please create a separate project for each fund. Otherwise, you will need to take care of this with internal journals inside of your department or lab. During each monthly billing cycle, the NERC team will upload the billing Comma-separated values (CSV) files to the HUIT Central Billing system accessible AWS Object Storage (S3) bucket. The HUIT Central Billing system ingests billing data files provided by NERC, maps the usage costs to HUIT Billing customers (and GL Codes) and then includes those amounts in HUIT Monthly Billing of all customers. This is an automated process.

Please follow these two steps to ensure proper billing setup:

  1. Each Harvard PI must have a HUIT billing account linked to their NetID (abc123), and NERC requires a HUIT \"Customer Code\" for billing purposes. To create a HUIT billing account, sign up here with your HarvardKey. The PI's submission of the corresponding HUIT \"Customer Code\" is now seamlessly integrated into the PI user account role submission process. This means that PIs can provide the corresponding HUIT \"Customer Code\" either while submitting NERC's PI Request Form or by submitting a new ticket at NERC's Support Ticketing System under the \"NERC PI Account Request\" option in the Help Topic dropdown menu.

    What if you already have an existing Customer Code?

    Please note that if you already have an existing active NERC account, you need to provide your HUIT Customer Code to NERC. If you think your department may already have a HUIT account but you don\u2019t know the corresponding Customer Code then you can contact HUIT Billing to get the required Customer Code.

  2. During the Resource Allocation review and approval process, we will utilize the HUIT \"Customer Code\" provided by the PI in step #1 to align it with the approved allocation. Before confirming the mapping of the Customer Code to the Resource Allocation, we will send an email to the PI to confirm its accuracy and then approve the requested allocation. Subsequently, after the allocation is approved, we will request the PI to initiate a change request to input the correct \"Customer Code\" into the allocation's \"Institution-Specific Code\" attribute's value.

    Very Important Note

    We recommend keeping your \"Institution-Specific Code\" updated at all times, ensuring it accurately reflects your current and valid Customer Code. The PI or project manager(s) have the authority to request changes for updating the \"Institution-Specific Code\" attribute for each resource allocation. They can do so by submitting a Change Request as outlined here.

    How to view Project Name, Project ID & Institution-Specific Code?

    By clicking on the Allocation detail page through ColdFront, you can access information about the allocation of each resource, including OpenStack and OpenShift as described here. You can review and verify Allocated Project Name, Allocated Project ID and Institution-Specific Code attributes, which are located under the \"Allocation Attributes\" section on the detail page as described here.

    Once we confirm the six-digit HUIT Customer Code for the PI and the correct resource allocation, the NERC admin team will initiate the creation of a new ServiceNow ticket. This will be done by reaching out to HUIT Billing or directly emailing HUIT Billing at huit-billing@harvard.edu for the approved and active allocation request.

    In this email, the NERC admin needs to specify the Allocated Project ID, Allocated Project Name, Customer Code, and PI's Email address. Then, the HUIT billing team will generate a unique Asset ID to be utilized by the Customer's HUIT billing portal.

    Important Information regarding HUIT Billing SLA

    Please note that we will require the PI or Manager(s) to repeat step #2 for any new resource allocation(s) as well as renewed allocation(s). Additionally, the HUIT Billing SLA for new Cloud Billing assets is 2 business days, although most requests are typically completed within 8 hours.

    Harvard University Security Policy Information

    Please note that all assets deployed to your NERC project must be compliant with University Security policies as described here. Please familiarize yourself with the Harvard University Information Security Policy and your role in securing data. If you have any questions about how Security should be implemented in the Cloud, please contact your school security officer: \"Havard Security Officer\".

"},{"location":"get-started/cost-billing/billing-process-for-my-institution/","title":"Billing Process for My Institution","text":""},{"location":"get-started/cost-billing/billing-process-for-my-institution/#memorandum-of-understanding-mou","title":"Memorandum of Understanding (MOU)","text":"

The New England Research Cloud (NERC) is a shared service offered through the Massachusetts Green High Performance Computing Center (MGHPCC). The MGHPCC will enter into a lightweight Memorandum of Understanding (MOU) with each institutional customer that consumes NERC services. The MOU is intended to ensure the institution maintains access to valuable and relevant cloud services provided by the MGHPCC via the NERC to be better aligned to a number of research regulations, policies, and requirements and also ensure NERC remains sustainable over time.

"},{"location":"get-started/cost-billing/billing-process-for-my-institution/#institutions-with-established-mous-and-billing-processes","title":"Institutions with established MOUs and Billing Processes","text":"

For cost recovery purposes, institutional customers may elect to receive one invoice for the usage of NERC services by its PIs and cost recovery internally. Every month, the NERC team will export, back up, and securely store the billing data for all PIs in the form of comma-separated values (CSV) files and provide it to the MGHPCC for billing purposes.

The following institutions using our services have established MOU as well as billing processes with us:

  1. Harvard University

  2. Boston University

Upcoming MOU with other institutions

We are in the process of establishing MOUs for institutions not listed above.

PIs from other institutions not listed above can still utilize NERC services with the understanding that they are directly accountable for managing their usage and ensuring all service charges are paid promptly. If you have any some common questions or need further information, see our Billing FAQs for comprehensive answers.

If your institution does not have an MOU with us, please have someone from your faculty or administration contact us to discuss it soon by emailing us at help@nerc.mghpcc.org or, by submitting a new ticket at the NERC's Support Ticketing System.

"},{"location":"get-started/cost-billing/how-pricing-works/","title":"How does NERC pricing work?","text":"

As a new PI using NERC for the first time, am I entitled to any credits?

As a new PI using NERC for the first time, you might wonder if you get any credits. Yes, you'll receive up to $1000 for the first month only. But remember, this credit can not be used in the following months. Also, it does not apply to GPU resource usage.

NERC offers you a pay-as-you-go approach for pricing for our cloud infrastructure offerings (Tiers of Service), including Infrastructure-as-a-Service (IaaS) \u2013 Red Hat OpenStack and Platform-as-a-Service (PaaS) \u2013 Red Hat OpenShift. The exception is the Storage quotas in NERC Storage Tiers, where the cost is determined by your requested and approved allocation values to reserve storage from the total NESE storage pool. For NERC (OpenStack) Resource Allocations, storage quotas are specified by the \"OpenStack Volume Quota (GiB)\" and \"OpenStack Swift Quota (GiB)\" allocation attributes. Whereas for NERC-OCP (OpenShift) Resource Allocations, storage quotas are specified by the \"OpenShift Request on Storage Quota (GiB)\" and \"OpenShift Limit on Ephemeral Storage Quota (GiB)\" allocation attributes. If you have common questions or need more information, refer to our Billing FAQs for comprehensive answers. NERC offers a flexible cost model where an institution (with a per-project breakdown) is billed solely for the duration of the specific services required. Access is based on project-approved resource quotas, eliminating runaway usage and charges. There are no obligations of long-term contracts or complicated licensing agreements. Each institution will enter a lightweight MOU with MGHPCC that defines the services and billing model.

"},{"location":"get-started/cost-billing/how-pricing-works/#calculations","title":"Calculations","text":""},{"location":"get-started/cost-billing/how-pricing-works/#service-units-sus","title":"Service Units (SUs)","text":"Name vGPU vCPU RAM (GiB) Current Price H100 GPU 1 64 384 $6.04 A100sxm4 GPU 1 32 240 $2.078 A100 GPU 1 24 74 $1.803 V100 GPU 1 48 192 $1.214 K80 GPU 1 6 28.5 $0.463 CPU 0 1 4 $0.013

Expected Availability of H100 GPUs

H100 GPUs will be available in early 2025.

"},{"location":"get-started/cost-billing/how-pricing-works/#breakdown","title":"Breakdown","text":""},{"location":"get-started/cost-billing/how-pricing-works/#cpugpu-sus","title":"CPU/GPU SUs","text":"

Service Units (SUs) can only be purchased as a whole unit. We will charge for Pods (summed up by Project) and VMs on a per-hour basis for any portion of an hour they are used, and any VM \"flavor\"/Pod reservation is charged as a multiplier of the base SU for the maximum resource they reserve.

GPU SU Example:

  • A Project or VM with:

    1 A100 GPU, 24 vCPUs, 95MiB RAM, 199.2hrs

  • Will be charged:

    1 A100 GPU SUs x 200hrs (199.2 rounded up) x $1.803

    $360.60

OpenStack CPU SU Example:

  • A Project or VM with:

    3 vCPU, 20 GiB RAM, 720hrs (24hr x 30days)

  • Will be charged:

    5 CPU SUs due to the extra RAM (20GiB vs. 12GiB(3 x 4GiB)) x 720hrs x $0.013

    $46.80

Are VMs invoiced even when shut down?

Yes, VMs are invoiced as long as they are utilizing resources. In order not to be billed for a VM, you must delete your Instance/VM. It is advisable to create a snapshot of your VM prior to deleting it, ensuring you have a backup of your data and configurations. By proactively managing your VMs and resources, you can optimize your usage and minimize unnecessary costs.

If you have common questions or need more information, refer to our Billing FAQs for comprehensive answers.

OpenShift CPU SU Example:

  • Project with 3 Pods with:

    i. 1 vCPU, 3 GiB RAM, 720hrs (24hr*30days)

    ii. 0.1 vCPU, 8 GiB RAM, 720hrs (24hr*30days)

    iii. 2 vCPU, 4 GiB RAM, 720hrs (24hr*30days)

  • Project Will be charged:

    RoundUP(Sum(

    1 CPU SUs due to first pod * 720hrs * $0.013

    2 CPU SUs due to extra RAM (8GiB vs 0.4GiB(0.1*4GiB)) * 720hrs * $0.013

    2 CPU SUs due to more CPU (2vCPU vs 1vCPU(4GiB/4)) * 720hrs * $0.013

    ))

    =RoundUP(Sum(720(1+2+2)))*0.013

    $46.80

How to calculate cost for all running OpenShift pods?

If you prefer a function for the OpenShift pods here it is:

Project SU HR count = RoundUP(SUM(Pod1 SU hour count + Pod2 SU hr count + ...))

OpenShift Pods are summed up to the project level so that fractions of CPU/RAM that some pods use will not get overcharged. There will be a split between CPU and GPU pods, as GPU pods cannot currently share resources with CPU pods.

"},{"location":"get-started/cost-billing/how-pricing-works/#storage","title":"Storage","text":"

Storage is charged separately at a rate of $0.009 TiB/hr or $9.00E-6 GiB/hr. OpenStack volumes remain provisioned until they are deleted. VM's reserve volumes, and you can also create extra volumes yourself. In OpenShift pods, storage is only provisioned while it is active, and in persistent volumes, storage remains provisioned until it is deleted.

Very Important: Requested/Approved Allocated Storage Quota and Cost

The Storage cost is determined by your requested and approved allocation values. Once approved, these Storage quotas will need to be reserved from the total NESE storage pool for both NERC (OpenStack) and NERC-OCP (OpenShift) resources. For NERC (OpenStack) Resource Allocations, storage quotas are specified by the \"OpenStack Volume Quota (GiB)\" and \"OOpenStack Swift Quota (GiB)\" allocation attributes. Whereas for NERC-OCP (OpenShift) Resource Allocations, storage quotas are specified by the \"OpenShift Request on Storage Quota (GiB)\" and \"OpenShift Limit on Ephemeral Storage Quota (GiB)\" allocation attributes.

Even if you have deleted all volumes, snapshots, and object storage buckets and objects in your OpenStack and OpenShift projects. It is very essential to adjust the approved values for your NERC (OpenStack) and NERC-OCP (OpenShift) resource allocations to zero (0) otherwise you will still be incurring a charge for the approved storage as explained in Billing FAQs.

Keep in mind that you can easily scale and expand your current resource allocations within your project. Follow this guide on how to use NERC's ColdFront to reduce your Storage quotas for NERC (OpenStack) allocations and this guide for NERC-OCP (OpenShift) allocations.

Storage Example 1:

  • Volume or VM with:

    500GiB for 699.2hrs

  • Will be charged:

    .5 Storage TiB SU (.5 TiB x 700hrs) x $0.009 TiB/hr

    $3.15

Storage Example 2:

  • Volume or VM with:

    10TiB for 720hrs (24hr x 30days)

  • Will be charged:

    10 Storage TiB SU (10TiB x 720 hrs) x $0.009 TiB/hr

    $64.80

Storage includes all types of storage Object, Block, Ephemeral & Image.

"},{"location":"get-started/cost-billing/how-pricing-works/#high-level-function","title":"High-Level Function","text":"

To provide a more practical way to calculate your usage, here is a function of how the calculation works for OpenShift and OpenStack.

  1. OpenStack = (Resource (vCPU/RAM/vGPU) assigned to VM flavor converted to number of equivalent SUs) * (time VM has been running), rounded up to a whole hour + Extra storage.

    NERC's OpenStack Flavor List

    You can find the most up-to-date information on the current NERC's OpenStack flavors with corresponding SUs by referring to this page.

  2. OpenShift = (Resource (vCPU/RAM) requested by Pod converted to the number of SU) * (time Pod was running), summed up to project level rounded up to the whole hour.

"},{"location":"get-started/cost-billing/how-pricing-works/#how-to-pay","title":"How to Pay?","text":"

To ensure a comprehensive understanding of the billing process and payment options for NERC offerings, we advise PIs/Managers to visit individual pages designated for each institution. These pages provide detailed information specific to each organization's policies and procedures regarding their billing. By exploring these dedicated pages, you can gain insights into the preferred payment methods, invoicing cycles, breakdowns of cost components, and any available discounts or offers. Understanding the institution's unique approach to billing ensures accurate planning, effective financial management, and a transparent relationship with us.

If you have any some common questions or need further information, see our Billing FAQs for comprehensive answers.

"},{"location":"get-started/cost-billing/nerc-pricing-calculator/","title":"NERC Pricing Calculator","text":"

The NERC Pricing Calculator is a google excel based tool for estimating the cost of utilizing various NERC resources in different NERC service offerings. It offers a user-friendly interface, allowing users to input their requirements and customize configurations to generate accurate and tailored cost estimates for optimal budgeting and resource allocation.

Start your estimate with no commitment, and explore NERC services and pricing for your research needs by using this online tool.

How to use the NERC Pricing Calculator?

Please Note, you need to make a copy of this tool before estimating the cost and once copied you can easily update corresponding resource type columns' values on your own working sheet that will reflect your potential Service Units (SU), Rate, and cost per Hour, Month and Year. This tool has 4 sheets at the bottom as shown here: If you are more interested to calculate your cost estimates based on the available NERC OpenStack flavors (which define the compute, memory, and storage capacity for your dedicated instances), you can select and use the second sheet titled \"OpenStack Flavor\". For cost estimating the NERC OpenShift resources, you can use the first sheet titled \"OpenShift SU\" and input pod specific resource requests in each row. If you are scaling the pods more than one then you need to enter a new row or entry for each scaled pods. For Storage cost, you need to use the third sheet titled \"Calculate Storage\". And then the total cost will be reflected at the last sheet titled \"Total Cost\".

For more information about how NERC pricing works, see How does NERC pricing work and to know more about billing process for your own institution, see Billing Process for My Institution.

"},{"location":"migration-moc-to-nerc/Step1/","title":"Creating NERC Project and Networks","text":"

This process includes some waiting for emails and approvals. It is advised to start this process and then move to Step 2 and continue with these steps once you recieve approval.

"},{"location":"migration-moc-to-nerc/Step1/#account-creation-quota-request","title":"Account Creation & Quota Request","text":"
  1. Register for your new NERC account here.

    1. Wait for an approval email.
  2. Register to be a PI for a NERC account here.

    1. Wait for an approval email.
  3. Request the quota necessary for all of your MOC Projects to be added to NERC here (link also in PI approval email).

    1. Log in with your institution login by clicking on Log in via OpenID Connect (highlighted in yellow above).

    2. Under Projects>> Click on the name of your project (highlighted in yellow above).

    3. Scroll down until you see Request Resource Allocation (highlighted in yellow above) and click on it.

    4. Fill out the Justification (highlighted in purple above) for the quota allocation.

    5. Using your \u201cMOC Instance information\u201d table you gathered from your MOC project calculate the total number of Instances, VCPUs, RAM and use your \u201cMOC Volume Information\u201d table to calculate Disk space you will need.

    6. Using the up and down arrows (highlighted in yellow above) or by entering the number manually select the multiple of 1 Instance, 2 vCPUs, 0 GPUs, 4GB RAM, 2 Volumes and 100GB Disk and 1GB Object Storage that you will need.

      1. For example if I need 2 instances 2 vCPUs, 3GB RAM, 3 Volumes and 30GB of storage I would type in 2 or click the up arrow once to select 2 units.
    7. Click Submit (highlighted in green above).

  4. Wait for your allocation approval email.

"},{"location":"migration-moc-to-nerc/Step1/#setup","title":"Setup","text":""},{"location":"migration-moc-to-nerc/Step1/#login-to-the-dashboard","title":"Login to the Dashboard","text":"
  1. Log into the NERC OpenStack Dashboard using your OpenID Connect password.

    1. Click Connect.

    2. Select your institution from the drop down (highlighted in yellow above).

    3. Click Log On (highlighted in purple).

    4. Follow your institution's log on instructions.

"},{"location":"migration-moc-to-nerc/Step1/#setup-nerc-network","title":"Setup NERC Network","text":"
  1. You are then brought to the Project>Compute>Overview location of the Dashboard.

    1. This will look very familiar as the MOC and NERC Dashboard are quite similar.

    2. Follow the instructions here to set up your network/s (you may also use the default_network if you wish).

      1. The networks don't have to exactly match the MOC. You only need the networks for creating your new instances (and accessing them once we complete the migration).
    3. Follow the instructions here to set up your router/s (you may also use the default_router if you wish).

    4. Follow the instructions here to set up your Security Group/s.

      1. This is where you can use your \u201cMOC Security Group Information\u201d table to create similar Security Groups to the ones you had in the MOC.
    5. Follow the instructions here to set up your SSH Key-pair/s.

"},{"location":"migration-moc-to-nerc/Step2/","title":"Identify Volumes, Instances & Security Groups on the MOC that need to be Migrated to the NERC","text":"

Please read the instructions in their entirety before proceeding. Allow yourself enough time to complete them.

Volume Snapshots will not be migrated. If you have a Snapshot you wish to backup please \u201cCreate Volume\u201d from it first.

"},{"location":"migration-moc-to-nerc/Step2/#confirm-access-and-login-to-moc-dashboard","title":"Confirm Access and Login to MOC Dashboard","text":"
  1. Go to the MOC Dashboard.
"},{"location":"migration-moc-to-nerc/Step2/#sso-google-login","title":"SSO / Google Login","text":"
  1. If you have SSO through your Institution or google select Institution Account from the dropdown.

  2. Click Connect.

  3. Click on University Logins (highlighted in yellow below) if you are using SSO with your Institution.

    1. Follow your Institution's login steps after that, and skip to Gathering MOC information for the Migration.
  4. Click Google (highlighted in purple above) if your SSO is through Google.

    1. Follow standard Google login steps to get in this way, and skip to Gathering MOC information for the Migration.
"},{"location":"migration-moc-to-nerc/Step2/#keystone-credentials","title":"Keystone Credentials","text":"
  1. If you have a standard login and password leave the dropdown as Keystone Credentials.

  2. Enter your User Name.

  3. Enter your Password.

  4. Click Connect.

"},{"location":"migration-moc-to-nerc/Step2/#dont-know-your-login","title":"Don't know your login?","text":"
  1. If you do not know your login information please create a Password Reset ticket.

  2. Click Open a New Ticket (highlighted in yellow above).

  3. Click the dropdown and select Forgot Pass & SSO Account Link (highlighted in blue above).

  4. In the text field (highlighted in purple above) provide the Institution email, project you are working on and the email address you used to create the account.

  5. Click Create Ticket (highlighted in yellow above) and wait for the pinwheel.

  6. You will receive an email to let you know that the MOC support staff will get back to you.

"},{"location":"migration-moc-to-nerc/Step2/#gathering-moc-information-for-the-migration","title":"Gathering MOC information for the Migration","text":"
  1. You are then brought to the Project>Compute>Overview location of the Dashboard.

"},{"location":"migration-moc-to-nerc/Step2/#create-tables-to-hold-your-information","title":"Create Tables to hold your information","text":"

Create 3 tables of all of your Instances, your Volumes and Security Groups, for example, if you have 2 instances, 3 volumes and 2 Security Groups like the samples below your lists might look like this:

"},{"location":"migration-moc-to-nerc/Step2/#moc-instance-information-table","title":"MOC Instance Information Table","text":"Instance Name MOC VCPUs MOC Disk MOC RAM MOC UUID Fedora_test 1 10GB 1GB 16a1bfc2-8c90-4361-8c13-64ab40bb6207 Ubuntu_Test 1 10GB 2GB 6a40079a-59f7-407c-9e66-23bc5b749a95 total 2 20GB 3GB"},{"location":"migration-moc-to-nerc/Step2/#moc-volume-information-table","title":"MOC Volume Information Table","text":"MOC Volume Name MOC Disk MOC Attached To Bootable MOC UUID NERC Volume Name Fedora 10GiB Fedora_test Yes ea45c20b-434a-4c41-8bc6-f48256fc76a8 9c73295d-fdfa-4544-b8b8-a876cc0a1e86 10GiB Ubuntu_Test Yes 9c73295d-fdfa-4544-b8b8-a876cc0a1e86 Snapshot of Fed_Test 10GiB Fedora_test No ea45c20b-434a-4c41-8bc6-f48256fc76a8 total 30GiB"},{"location":"migration-moc-to-nerc/Step2/#moc-security-group-information-table","title":"MOC Security Group Information Table","text":"Security Group Name Direction Ether Type IP Protocol Port Range Remote IP Prefix ssh_only_test Ingress IPv4 TCP 22 0.0.0.0/0 ping_only_test Ingress IPv4 ICMP Any 0.0.0.0/0"},{"location":"migration-moc-to-nerc/Step2/#gather-the-instance-information","title":"Gather the Instance Information","text":"

Gather the Instance UUIDs (of only the instances that you need to migrate to the NERC).

  1. Click Instances (highlighted in pink in image above)

  2. Click the Instance Name (highlighted in Yellow above) of the first instance you would like to gather data on.

  3. Locate the ID row (highlighted in green above) and copy and save the ID (highlighted in purple above).

    1. This is the UUID of your first Instance.
  4. Locate the RAM, VCPUs & Disk rows (highlighted in yellow) and copy and save the associated values (highlighted in pink).

  5. Repeat this section for each Instance you have.

"},{"location":"migration-moc-to-nerc/Step2/#gather-the-volume-information","title":"Gather the Volume Information","text":"

Gather the Volume UUIDs (of only the volumes that you need to migrate to the NERC).

  1. Click Volumes dropdown.

  2. Select Volumes (highlighted in purple above).

  3. Click the Volume Name (highlighted in yellow above) of the first volume you would like to gather data on.

    1. The name might be the same as the ID (highlighted in blue above).

  4. Locate the ID row (highlighted in green above) and copy and save the ID (highlighted in purple above).

    1. This is the UUID of your first Volume.
  5. Locate the Size row (highlighted in yellow above) and copy and save the Volume size (highlighted in pink above).

  6. Locate the Bootable row (highlighted in gray above) and copy and save the Volume size (highlighted in red above).

  7. Locate the Attached To row (highlighted in blue above) and copy and save the Instance this Volume is attached to (highlighted in orange above).

    1. If the volume is not attached to an image it will state \u201cNot attached\u201d.
  8. Repeat this section for each Volume you have.

"},{"location":"migration-moc-to-nerc/Step2/#gather-your-security-group-information","title":"Gather your Security Group Information","text":"

If you already have all of your Security Group information outside of the OpenStack Dashboard skip to the section.

Gather the Security Group information (of only the security groups that you need to migrate to the NERC).

  1. Click Network dropdown

  2. Click Security Groups (highlighted in yellow above).

  3. Click Manage Rules (highlighted in yellow above) of the first Security Group you would like to gather data on.

  4. Ignore the first 2 lines (highlighted in yellow above).

  5. Write down the important information for all lines after (highlighted in blue above).

    1. Direction, Ether Type, IP Protocol, Port Range, Remote IP Prefix, Remote Security Group.
  6. Repeat this section for each security group you have.

"},{"location":"migration-moc-to-nerc/Step3/","title":"Steps to Migrate Volumes from MOC to NERC","text":""},{"location":"migration-moc-to-nerc/Step3/#create-a-spreadsheet-to-track-the-values-you-will-need","title":"Create a spreadsheet to track the values you will need","text":"
  1. The values you will want to keep track of are.

    Label Value MOCAccess MOCSecret NERCAccess NERCSecret MOCEndPoint https://kzn-swift.massopen.cloud NERCEndPoint https://stack.nerc.mghpcc.org:13808 MinIOVolume MOCVolumeBackupID ContainerName NERCVolumeBackupID NERCVolumeName
  2. It is also helpful to have a text editor open so that you can insert the values from the spreadsheet into the commands that need to be run.

"},{"location":"migration-moc-to-nerc/Step3/#create-a-new-moc-mirror-to-nerc-instance","title":"Create a New MOC Mirror to NERC Instance","text":"
  1. Follow the instructions here to set up your instance.

    1. When selecting the Image please select moc-nerc-migration (highlighted in yellow above).

    2. Once the Instance is Running move onto the next step

  2. Name your new instance something you will remember, MirrorMOC2NERC for example.

  3. Assign a Floating IP to your new instance. If you need assistance please review the Floating IP steps here.

    1. Your floating IPs will not be the same as the ones you had in the MOC. Please claim new floating IPs to use.
  4. SSH into the MirrorMOC2NERC Instance. The user to use for login is centos. If you have any trouble please review the SSH steps here.

"},{"location":"migration-moc-to-nerc/Step3/#setup-application-credentials","title":"Setup Application Credentials","text":""},{"location":"migration-moc-to-nerc/Step3/#gather-moc-application-credentials","title":"Gather MOC Application Credentials","text":"
  1. Follow the instructions here to create your Application Credentials.

    1. Make sure to save the clouds.yaml as clouds_MOC.yaml.
"},{"location":"migration-moc-to-nerc/Step3/#gathering-nerc-application-credentials","title":"Gathering NERC Application Credentials","text":"
  1. Follow the instructions under the header Command Line setup here to create your Application Credentials.

    1. Make sure to save the clouds.yaml as clouds_NERC.yaml.
"},{"location":"migration-moc-to-nerc/Step3/#combine-the-two-cloudsyaml-files","title":"Combine the two clouds.yaml files","text":"
  1. Make a copy of clouds_MOC.yaml and save as clouds.yaml

  2. Open clouds.yaml in a text editor of your choice.

    1. Change the openstack (highlighted in yellow above) value to moc (highlighted in yellow two images below).
  3. Open clouds_NERC.yaml in a text editor of your choice.

    1. Change the openstack (highlighted in yellow above) value to nerc (highlighted in green below).

    2. Highlight and copy everything from nerc to the end of the line that starts with auth_type

    3. Paste the copied text into clouds.yaml below the line that starts with auth_type. Your new clouds.yaml will look similar to the image above.

  4. For further instructions on clouds.yaml files go Here.

"},{"location":"migration-moc-to-nerc/Step3/#moving-application-credentials-to-vm","title":"Moving Application Credentials to VM","text":"
  1. SSH into the VM created at the top of this page for example MirrorMOC2NERC.

  2. Create the openstack config folder and empty clouds.yaml file.

    mkdir -p ~/.config/openstack\ncd ~/.config/openstack\ntouch clouds.yaml\n
  3. Open the clouds.yaml file in your favorite text editor. (vi is preinstalled).

  4. Copy the entire text inside the clouds.yaml file on your local computer.

  5. Paste the contents of the local clouds.yaml file into the clouds.yaml on the VM.

  6. Save and exit your VM text editor.

"},{"location":"migration-moc-to-nerc/Step3/#confirm-the-instances-are-shut-down","title":"Confirm the Instances are Shut Down","text":"
  1. Confirm the instances are Shut Down. This is a very important step because we will be using the force modifier when we make our backup. The volume can become corrupted if the Instance is not in a Shut Down state.

  2. Log into the Instance page of the MOC Dashboard

  3. Check the Power State of all of the instances you plan to migrate volumes from are set to Shut Down (highlighted in yellow in image above).

    1. If they are not please do so from the Actions Column.

      1. Click the drop down arrow under actions.

      2. Select Shut Off Instance (blue arrow pointing to it in image above).

"},{"location":"migration-moc-to-nerc/Step3/#backup-and-move-volume-data-from-moc-to-nerc","title":"Backup and Move Volume Data from MOC to NERC","text":"
  1. SSH into the VM created at the top of this page. For steps on how to do this please see instructions here.
"},{"location":"migration-moc-to-nerc/Step3/#create-ec2-credentials-in-moc-nerc","title":"Create EC2 credentials in MOC & NERC","text":"
  1. Generate credentials for Kaizen with the command below.

    openstack --os-cloud moc ec2 credentials create\n

    1. Copy the access (circled in red above) and secret (circled in blue above) values into your table as <MOCAccess> and <MOCSecret>.
  2. Generate credentials for the NERC with the command below.

    openstack --os-cloud nerc ec2 credentials create\n

    1. Copy the access (circled in red above) and secret (circled in blue above) values into your table as as <NERCAccess> and <NERCSecret>.
"},{"location":"migration-moc-to-nerc/Step3/#find-object-store-endpoints","title":"Find Object Store Endpoints","text":"
  1. Look up information on the object-store service in MOC with the command below.

    openstack --os-cloud moc catalog show object-store -c endpoints\n

    1. If the value is different than https://kzn-swift.massopen.cloud copy the base URL for this service (circled in red above).
  2. Look up information on the object-store service in NERC with the command below.

    openstack --os-cloud nerc catalog show object-store -c endpoints\n

    1. If the value is different than https://stack.nerc.mghpcc.org:13808 copy the base URL for this service (circled in red above).
"},{"location":"migration-moc-to-nerc/Step3/#configure-minio-client-aliases","title":"Configure minio client aliases","text":"
  1. Create a MinIO alias for MOC using the base URL of the \"public\" interface of the object-store service <MOCEndPoint> and the EC2 access key (ex. <MOCAccess>) & secret key (ex. <MOCSecret>) from your table.

    $ mc alias set moc https://kzn-swift.massopen.cloud <MOCAccess> <MOCSecret>\nmc: Configuration written to `/home/centos/.mc/config.json`. Please update your access credentials.\n mc: Successfully created `/home/centos/.mc/share`.\nmc: Initialized share uploads `/home/centos/.mc/share/uploads.json` file.\nmc: Initialized share downloads `/home/centos/.mc/share/downloads.json` file.\nAdded `moc` successfully.\n
  2. Create a MinIO alias for NERC using the base URL of the \"public\" interface of the object-store service <NERCEndPoint> and the EC2 access key (ex. <NERCAccess>) & secret key (ex. <NERCSecret>) from your table.

    $ mc alias set nerc https://stack.nerc.mghpcc.org:13808 <NERCAccess> <NERCSecret>\nAdded `nerc` successfully.\n
"},{"location":"migration-moc-to-nerc/Step3/#backup-moc-volumes","title":"Backup MOC Volumes","text":"
  1. Locate the desired Volume UUID from the table you created in Step 2 Gathering MOC Information.

  2. Add the first Volume ID from your table to the code below in the <MOCVolumeID> field and create a Container Name to replace the <ContainerName> field. Container Name should be easy to remember as well as unique so include your name. Maybe something like thomasa-backups.

    openstack --os-cloud moc volume backup create --force --container <ContainerName> <MOCVolumeID>\n+-------+---------------------+\n| Field | Value               |\n+-------+---------------------+\n| id    | <MOCVolumeBackupID> |\n| name  | None                |\n
    1. Copy down your <MOCVolumeBackupID> to your table.
  3. Wait for the backup to become available. You can run the command below to check on the status. If your volume is 25 or larger this might be a good time to go get a warm beverage or lunch.

    openstack --os-cloud moc volume backup list\n+---------------------+------+-------------+-----------+------+\n| ID                  | Name | Description | Status    | Size |\n+---------------------+------+-------------+-----------+------+\n| <MOCVolumeBackupID> | None | None        | creating  |   10 |\n...\nopenstack --os-cloud moc volume backup list\n+---------------------+------+-------------+-----------+------+\n| ID                  | Name | Description | Status    | Size |\n+---------------------+------+-------------+-----------+------+\n| <MOCVolumeBackupID> | None | None        | available |   10 |\n
"},{"location":"migration-moc-to-nerc/Step3/#gather-minio-volume-data","title":"Gather MinIO Volume data","text":"
  1. Get the volume information for future commands. Use the same <ContainerName> from when you created the volume backup. It is worth noting that this value shares the ID number with the VolumeID.
    $ mc ls moc/<ContainerName>\n[2022-04-29 09:35:16 EDT]     0B <MinIOVolume>/\n
"},{"location":"migration-moc-to-nerc/Step3/#create-a-container-on-nerc","title":"Create a Container on NERC","text":"
  1. Create the NERC container that we will send the volume to. Use the same <ContainerName> from when you created the volume backup.
    $ mc mb nerc/<ContainerName>\nBucket created successfully `nerc/<ContainerName>`.\n
"},{"location":"migration-moc-to-nerc/Step3/#mirror-the-volume-from-moc-to-nerc","title":"Mirror the Volume from MOC to NERC","text":"
  1. Using the volume label from MinIO <MinIOVolume> and the <ContainerName> for the command below you will kick off the move of your volume. This takes around 30 sec per GB of data in your volume.
    $ mc mirror moc/<ContainerName>/<MinIOVolume> nerc/<ContainerName>/<MinIOVolume>\n...123a30e_sha256file:  2.61GB / 2.61GB [=========...=========] 42.15Mib/s 1m3s\n
"},{"location":"migration-moc-to-nerc/Step3/#copy-the-backup-record-from-moc-to-nerc","title":"Copy the Backup Record from MOC to NERC","text":"
  1. Now that we've copied the backup data into the NERC environment, we need to register the backup with the NERC backup service. We do this by copying metadata from MOC. You will need the original <MOCVolumeBackupID> you used to create the original Backup.

    openstack --os-cloud moc volume backup record export -f value <MOCVolumeBackupID> > record.txt\n
  2. Next we will import the record into NERC.

    openstack --os-cloud nerc volume backup record import -f value $(cat record.txt)\n<NERCVolumeBackupID>\nNone\n
    1. Copy <NERCVolumeBackupID> value into your table.
"},{"location":"migration-moc-to-nerc/Step3/#create-an-empty-volume-on-nerc-to-receive-the-backup","title":"Create an Empty Volume on NERC to Receive the Backup","text":"
  1. Create a volume in the NERC environment to receive the backup. This must be the same size or larger than the original volume which can be changed by modifying the <size> field. Remove the \"--bootable\" flag if you are not creating a bootable volume. The <NERCVolumeName> field can be any name you want, I would suggest something that will help you keep track of what instance you want to attach it to. Make sure to fill in the table you created in Step 2with the <NERCVolumeName> value in the NERC Volume Name column.
    openstack --os-cloud nerc volume create --bootable --size <size> <NERCVolumeName>\n+---------------------+----------------+\n| Field               | Value          |\n+---------------------+----------------+\n| attachments         | []             |\n| availability_zone   | nova           |\n...\n| id                  | <NERCVolumeID> |\n...\n| size                | <size>         |\n+---------------------+----------------+\n
"},{"location":"migration-moc-to-nerc/Step3/#restore-the-backup","title":"Restore the Backup","text":"
  1. Restore the Backup to the Volume you just created.

    openstack --os-cloud nerc volume backup restore <NERCVolumeBackupID> <NERCVolumeName>\n
  2. Wait for the volume to shift from restoring-backup to available.

    openstack --os-cloud nerc volume list\n+----------------+------------+------------------+------+-------------+\n| ID             | Name       | Status           | Size | Attached to |\n+----------------+------------+------------------+------+-------------+\n| <NERCVolumeID> | MOC Volume | restoring-backup |    3 | Migration   |\nopenstack --os-cloud nerc volume list\n+----------------+------------+-----------+------+-------------+\n| ID             | Name       | Status    | Size | Attached to |\n+----------------+------------+-----------+------+-------------+\n| <NERCVolumeID> | MOC Volume | available |    3 | Migration   |\n
  3. Repeat these Backup and Move Volume Data steps for each volume you need to migrate.

"},{"location":"migration-moc-to-nerc/Step3/#create-nerc-instances-using-moc-volumes","title":"Create NERC Instances Using MOC Volumes","text":"
  1. If you have volumes that need to be attached to an instance please follow the next steps.

  2. Follow the instructions here to set up your instance/s.

    1. Instead of using an Image for your Boot Source you will use a Volume (orange arrow in image below).

    2. Select the <NERCVolumeName> you created in step Create an Empty Volume on NERC to Recieve the Backup

    3. The Flavor will be important as this decides how much vCPUs, RAM, and Disk this instance will consume of your total.

      1. If for some reason the earlier approved resource quota is not sufficient you can request further quota by following these steps.
  3. Repeat this section for each instance you need to create.

"},{"location":"migration-moc-to-nerc/Step4/","title":"Remove Volume Backups to Conserve Storage","text":"

If you find yourself low on Volume Storage please follow the steps below to remove your old Volume Backups. If you are very low on space you can do this every time you finish copying a new volume to the NERC. If on the other hand you have plety of remaining space feel free to leave all of your Volume Backups as they are.

  1. SSH into the MirrorMOC2NERC Instance. The user to use for login is centos. If you have any trouble please review the SSH steps here.
"},{"location":"migration-moc-to-nerc/Step4/#check-remaining-moc-volume-storage","title":"Check Remaining MOC Volume Storage","text":"
  1. Log into the MOC Dashboard and go to Project > Compute > Overview.

  2. Look at the Volume Storage meter (highlighted in yellow in image above).

"},{"location":"migration-moc-to-nerc/Step4/#delete-moc-volume-backups","title":"Delete MOC Volume Backups","text":"
  1. Gather a list of current MOC Volume Backups with the command below.

    openstack --os-cloud moc volume backup list\n+---------------------+------+-------------+-----------+------+\n| ID                  | Name | Description | Status    | Size |\n+---------------------+------+-------------+-----------+------+\n| <MOCVolumeBackupID> | None | None        | available |   10 |\n
  2. Only remove Volume Backups you are sure have been moved to the NERC. with the command below you can delete Volume Backups.

    openstack --os-cloud moc volume backup delete <MOCVolumeBackupID>\n
  3. Repeat the MOC Volume Backup section for all MOC Volume Backups you wish to remove.

"},{"location":"migration-moc-to-nerc/Step4/#delete-moc-container-containername","title":"Delete MOC Container <ContainerName>","text":"

Remove the Container created i.e. <ContainerName> on MOC side with a unique name during migration. Replace the <ContainerName> field with your own container name created during migration process:

openstack --os-cloud moc container delete --recursive <ContainerName>\n

Verify the <ContainerName> is removed from MOC:

openstack --os-cloud moc container list\n
"},{"location":"migration-moc-to-nerc/Step4/#check-remaining-nerc-volume-storage","title":"Check Remaining NERC Volume Storage","text":"
  1. Log into the NERC Dashboard and go to Project > Compute > Overview.

  2. Look at the Volume Storage meter (highlighted in yellow in image above).

"},{"location":"migration-moc-to-nerc/Step4/#delete-nerc-volume-backups","title":"Delete NERC Volume Backups","text":"
  1. Gather a list of current NERC Volume Backups with the command below.

    openstack --os-cloud nerc volume backup list\n+---------------------+------+-------------+-----------+------+\n| ID                  | Name | Description | Status    | Size |\n+---------------------+------+-------------+-----------+------+\n| <MOCVolumeBackupID> | None | None        | available |   3  |\n
  2. Only remove Volume Backups you are sure have been migrated to NERC Volumes. Keep in mind that you might not have named the volume the same as on the MOC so check your table from Step 2 to confirm. You can confirm what Volumes you have in NERC with the following command.

    openstack --os-cloud nerc volume list\n+----------------+------------------+--------+------+----------------------------------+\n| ID             | Name             | Status | Size | Attached to                      |\n+----------------+------------------+--------+------+----------------------------------+\n| <NERCVolumeID> | <NERCVolumeName> | in-use |    3 | Attached to MOC2NERC on /dev/vda |\n
  3. To remove volume backups please use the command below.

    openstack --os-cloud nerc volume backup delete <MOCVolumeBackupID>\n
  4. Repeat the NERC Volume Backup section for all NERC Volume Backups you wish to remove.

"},{"location":"migration-moc-to-nerc/Step4/#delete-nerc-container-containername","title":"Delete NERC Container <ContainerName>","text":"

Remove the Container created i.e. <ContainerName> on NERC side with a unique name during migration to mirror the Volume from MOC to NERC. Replace the <ContainerName> field with your own container name created during migration process:

openstack --os-cloud nerc container delete --recursive <ContainerName>\n

Verify the <ContainerName> is removed from NERC:

openstack --os-cloud nerc container list\n
"},{"location":"openshift/","title":"OpenShift Tutorial Index","text":"

If you're just starting out, we recommend starting from OpenShift Overview and going through the tutorial in order.

If you just need to review a specific step, you can find the page you need in the list below.

"},{"location":"openshift/#openshift-getting-started","title":"OpenShift Getting Started","text":"
  • OpenShift Overview <<-- Start Here
"},{"location":"openshift/#openshift-web-console","title":"OpenShift Web Console","text":"
  • Access the NERC's OpenShift Web Console
  • Web Console Overview
"},{"location":"openshift/#openshift-command-line-interface-cli-tools","title":"OpenShift command-line interface (CLI) Tools","text":"
  • OpenShift CLI Tools Overview
  • How to Setup the OpenShift CLI Tools
"},{"location":"openshift/#creating-your-first-application-on-openshift","title":"Creating Your First Application on OpenShift","text":"
  • Creating A Sample Application

  • Creating Your Own Developer Catalog Service

"},{"location":"openshift/#editing-applications","title":"Editing Applications","text":"
  • Editing your applications

  • Scaling and Performance Guide

"},{"location":"openshift/#storage","title":"Storage","text":"
  • Storage Overview
"},{"location":"openshift/#deleting-applications","title":"Deleting Applications","text":"
  • Deleting your applications
"},{"location":"openshift/#decommission-openshift-resources","title":"Decommission OpenShift Resources","text":"
  • Decommission OpenShift Resources
"},{"location":"openshift/applications/creating-a-sample-application/","title":"Creating A Sample Application","text":"

NERC's OpenShift service is a platform that provides a cloud-native environment for developing and deploying applications.

Here, we walk through the process of creating a simple web application, deploying it. This example uses the Node.js programming language, but the process with other programming languages will be similar. Instructions provided show the tasks using both the web console and the command-line tool.

"},{"location":"openshift/applications/creating-a-sample-application/#using-the-developer-perspective-on-nercs-openshift-web-console","title":"Using the Developer perspective on NERC's OpenShift Web Console","text":"
  1. Go to the NERC's OpenShift Web Console.

  2. Click on the Perspective Switcher drop-down menu and select Developer.

  3. In the Navigation Menu, click +Add.

  4. Creating applications using samples: Use existing code samples to get started with creating applications on the OpenShift Container Platform. Find the Create applications using samples section and then click on \"View all samples\" and then select the type of application you want to create (e.g. Node.js, Python, Ruby, etc.), it will load application from Git Repo URL and then review or modify the application Name for your application. Alternatively, If you want to create an application from your own source code located in a git repository, select Import from Git. In the Git Repo URL text box, enter your git repo url. For example: https://github.com/myuser/mypublicrepo.git. You may see a warning stating \"URL is valid but cannot be reached\". You can ignore this warning!

  5. Click \"Create\" to create your application.

  6. Once your application has been created, you can view the details by clicking on the application name in the Project Overview page.

  7. On the Topology View menu, click on your application, or the application circle if you are in graphical topology view. In the details panel that displays, scroll to the Routes section on the Resources tab and click on the link to go to the sample application. This will open your application in a new browser window. The link will look similar to http://<appname>-<mynamespace>.apps.shift.nerc.mghpcc.org.

Example: Deploying a Python application

For a quick example on how to use the \"Import from Git\" option to deploy a sample Python application, please refer to this guide.

"},{"location":"openshift/applications/creating-a-sample-application/#additional-resources","title":"Additional resources","text":"

For more options and customization please read this.

"},{"location":"openshift/applications/creating-a-sample-application/#using-the-cli-oc-command-on-your-local-terminal","title":"Using the CLI (oc command) on your local terminal","text":"

Alternatively, you can create an application on the NERC's OpenShift cluster by using the oc new-app command from the command line terminal.

i. Make sure you have the oc CLI tool installed and configured on your local machine following these steps.

Information

Some users may have access to multiple projects. Run the following command to switch to a specific project space: oc project <your-project-namespace>.

ii. To create an application, you will need to specify the language and runtime for your application. You can do this by using the oc new-app command and specifying a language and runtime. For example, to create a Node.js application, you can run the following command: oc new-app nodejs

iii. If you want to create an application from an existing Git repository, you can use the --code flag to specify the URL of the repository. For example: oc new-app --code https://github.com/myuser/mypublicrepo. If you want to use a different name, you can add the --name=<newname> argument to the oc new-app command. For example: oc new-app --name=mytestapp https://github.com/myuser/mypublicrepo. The platform will try to automatically detect the programming language of the application code and select the latest version of the base language image available. If oc new-app can't find any suitable Source-To-Image (S2I) builder images based on your source code in your Git repository or unable to detect the programming language or detects the wrong one, you can always specify the image you want to use as part of the new-app argument, with oc new-app <image url>~<git url>. If it is using a test application based on Node.js, we could use the same command as before but add nodejs~ before the URL of the Git repository. For example: oc new-app nodejs~https://github.com/myuser/mypublicrepo.

Important Note

If you are using a private remote Git repository, you can use the --source-secret flag to specify an existing source clone secret that will get injected into your BuildConfig to access the repository. For example: oc new-app https://github.com/myuser/yourprivaterepo --source-secret=yoursecret.

iv. Once your application has been created, You can run oc status to see if your application was successfully built and deployed. Builds and deployments can sometimes take several minutes to complete, so you may run this several times. you can view the details by running the oc get pods command. This will show you a list of all the pods running in your project, including the pod for your new application.

v. When using the oc command-line tool to create an application, a route is not automatically set up to make your application web accessible. Run the following to make the test application web accessible: oc create route edge --service=mytestapp --insecure-policy=Redirect. Once the application is deployed and the route is set up, it can be accessed at a web URL similar to http://mytestapp-<mynamespace>.apps.shift.nerc.mghpcc.org.

"},{"location":"openshift/applications/creating-a-sample-application/#for-more-additional-resources","title":"For more additional resources","text":"

For more options and customization please read this.

"},{"location":"openshift/applications/creating-a-sample-application/#using-the-developer-catalog-on-nercs-openshift-web-console","title":"Using the Developer Catalog on NERC's OpenShift Web Console","text":"

The Developer Catalog offers a streamlined process for deploying applications and services supported by Operator-backed services like CI/CD, Databases, Builder Images, and Helm Charts. It comprises a diverse array of application components, services, event sources, and source-to-image builders ready for integration into your project.

About Quick Start Templates

By default, the templates build using a public source repository on GitHub that contains the necessary application code. For more options and customization please read this.

"},{"location":"openshift/applications/creating-a-sample-application/#steps","title":"Steps","text":"
  1. Go to the NERC's OpenShift Web Console.

  2. Click on the Perspective Switcher drop-down menu and select Developer.

  3. In the Navigation Menu, click +Add.

  4. You need to find the Developer Catalog section and then select All services option as shown below:

  5. Then, you will be able search any available services from the Developer Catalog templates by searching for it on catalog and choose the desired type of service or component that you wish to include in your project. For this example, select Databases to list all the database services and then click MariaDB to see the details for the service.

    To Create Your Own Developer Catalog Service

    You also have the option to create and integrate custom services into the Developer Catalog using a template, as described here.

  6. Once selected by clicking the template, you will see Instantiate Template web interface as shown below:

  7. Clicking \"Instantiate Template\" will display an automatically populated template containing details for the MariaDB service. Click \"Create\" to begin the creation process and enter any custom information required.

  8. View the MariaDB service in the Topology view as shown below:

"},{"location":"openshift/applications/creating-a-sample-application/#for-additional-resources","title":"For Additional resources","text":"

For more options and customization please read this.

"},{"location":"openshift/applications/creating-your-own-developer-catalog-service/","title":"Creating Your Own Developer Catalog Service","text":"

Here, we walk through the process of creating a simple RStudio web server template that bundles all resources required to run the server i.e. ConfigMap, Pod, Route, Service, etc. and then initiate and deploy application from that template.

This example template file is readily accessible from the Git Repository.

More about Writing Templates

For more options and customization please read this.

  1. Find the From Local Machine section and click on Import YAML as shown below:

  2. On opened YAML editor paste the contents of the template copied from the rstudio-server-template.yaml file located at the provided Git Repo.

  3. You need to find the Developer Catalog section and then select All services option as shown below:

  4. Then, you will be able to use the created Developer Catalog template by searching for \"RStudio\" on catalog as shown below:

  5. Once selected by clicking the template, you will see Instantiate Template web interface as shown below:

  6. Based on our template definition, we request that users input a preferred password for the RStudio server so the following interface will prompt for your password that will be used during login to the RStudio server.

  7. Once successfully initiated, you can either open the application URL using the Open URL icon as shown below or you can naviate to the Routes section and click on Location path as shown below:

  8. To get the Username to be used for login on RStudio server, you need to click on running pod i.e. rstudio-server as shown below:

  9. Then select the YAML section to find out the attribute value for runAsUser that is used as the Username while Sign in to RStudio server as shown below:

  10. Finally, you will be able to see the RStudio web interface!

Modifying uploaded templates

You can edit a template that has already been uploaded to your project: oc edit template <template>

"},{"location":"openshift/applications/deleting-applications/","title":"Deleting your applications","text":""},{"location":"openshift/applications/deleting-applications/#deleting-applications-using-the-developer-perspective-on-nercs-openshift-web-console","title":"Deleting applications using the Developer perspective on NERC's OpenShift Web Console","text":"

You can delete applications created in your project by using the Developer perspective as following:

To delete an application and all of its associated components using the Topology view menu in the Developer perspective:

  1. Go to the NERC's OpenShift Web Console.

  2. Click on the Perspective Switcher drop-down menu and select Developer.

  3. Click the application you want to delete to see the side panel with the resource details of the application.

  4. Click the Actions drop-down menu displayed on the upper right of the panel, and select Delete Application to see a confirmation dialog box as shown below:

  5. Enter the name of the application and click Delete to delete it.

Or, if you are using Graph view then you can also right-click the application you want to delete and click Delete Application to delete it as shown below:

"},{"location":"openshift/applications/deleting-applications/#deleting-applications-using-the-oc-command-on-your-local-terminal","title":"Deleting applications using the oc command on your local terminal","text":"

Alternatively, you can delete the resource objects by using the oc delete command from the command line terminal. Make sure you have the oc CLI tool installed and configured on your local machine following these steps.

How to select resource object?

You can delete a single resource object by name, or delete a set of resource objects by specifying a label selector.

When an application is deployed, resource objects for that application will typically have an app label applied to them with value corresponding to the name of the application. This can be used with the label selector to delete all resource objects for an application.

To test what resource objects would be deleted when using a label selector, use the oc get command to query the set of objects which would be matched.

oc get all --selector app=<application-name> -o name

For example:

oc get all --selector app=rstudio-server -o name\npod/rstudio-server\nservice/rstudio-server\nroute.route.openshift.io/rstudio-server\n

If you are satisfied that what is shown are the resource objects for your application, then run oc delete.

oc delete all --selector app=<application-name>

Important Note

Selector all matches on a subset of all resource object types that exist. It targets the core resource objects that would be created for a build and deployment. It will not include resource objects such as persistent volume claims (pvc), config maps (configmap), secrets (secret), and others.

You will either need to delete these resource objects separately, or if they also have been labelled with the app tag, list the resource object types along with all.

oc delete all,configmap,pvc,serviceaccount,rolebinding --selector app=<application-name>

If you are not sure what labels have been applied to resource objects for your application, you can run oc describe on the resource object to see the labels applied to it. For example:

oc describe pod/rstudio-server\nName:         rstudio-server\nNamespace:    64b664c37f2a47c39c3cf3942ff4d0be\nPriority:     0\nNode:         wrk-11/10.30.6.21\nStart Time:   Fri, 16 Dec 2022 10:59:23 -0500\nLabels:       app=rstudio-server\n            template.openshift.io/template-instance-owner=44a3fae8-4e8e-4058-a4a8-0af7bbb41f6\n...\n

Important Note

It is important to check what labels have been used with your application if you have created it using a template, as templates may not follow the convention of using the app label.

"},{"location":"openshift/applications/editing-applications/","title":"Editing applications","text":"

You can edit the configuration and the source code of the application you create using the Topology view.

"},{"location":"openshift/applications/editing-applications/#editing-the-source-code-of-an-application-using-the-developer-perspective","title":"Editing the source code of an application using the Developer perspective","text":"

You can click the \"Edit Source Code\" icon, displayed at the bottom-right of the deployed application, to access your source code and modify it as shown below:

Information

This feature is available only when you create applications using the From Git, Container Image, From Catalog, and From Dockerfile options.

"},{"location":"openshift/applications/editing-applications/#editing-the-application-configuration-using-the-developer-perspective","title":"Editing the application configuration using the Developer perspective","text":"
  1. In the Topology view, right-click the application to see the edit options available as shown below:

    Or, In the Topology view, click the deployed application to reveal the right-side Overview panel. From the Actions drop-down list, we can see the similar edit options available as shown below:

  2. Click on any of the options available to edit resource used by your application, the pop-up form will be pre-populated with the values you had added while creating the applicaiton.

  3. Click Save to restart the build and deploy a new image.

"},{"location":"openshift/applications/scaling-and-performance-guide/","title":"Scaling and Performance Guide","text":""},{"location":"openshift/applications/scaling-and-performance-guide/#understanding-pod","title":"Understanding Pod","text":"

Pods serve as the smallest unit of compute that can be defined, deployed, and managed within the OpenShift Container Platform (OCP). The OCP utilizes the Kubernetes concept of a pod, which consists of one or more containers deployed together on a single host.

Pods are essentially the building blocks of a Kubernetes cluster, analogous to a machine instance (either physical or virtual) for a container. Each pod is assigned its own internal IP address, granting it complete ownership over its port space. Additionally, containers within a pod can share local storage and network resources.

The lifecycle of a pod typically involves several stages: first, the pod is defined; then, it is scheduled to run on a node within the cluster; finally, it runs until its container(s) exit or until it is removed due to some other circumstance. Depending on the cluster's policy and the exit code of its containers, pods may be removed after exiting, or they may be retained to allow access to their container logs.

"},{"location":"openshift/applications/scaling-and-performance-guide/#example-pod-configurations","title":"Example pod configurations","text":"

The following is an example definition of a pod from a Rails application. It demonstrates many features of pods, most of which are discussed in other topics and thus only briefly mentioned here:

  1. Pods can be \"tagged\" with one or more labels, which can then be used to select and manage groups of pods in a single operation. The labels are stored in key/value format in the metadata hash.

  2. The pod restart policy with possible values Always, OnFailure, and Never. The default value is Always. Read this to learn about \"Configuring how pods behave after restart\".

  3. OpenShift Container Platform defines a security context for containers which specifies whether they are allowed to run as privileged containers, run as a user of their choice, and more. The default context is very restrictive but administrators can modify this as needed.

  4. containers specifies an array of one or more container definitions.

  5. The container specifies where external storage volumes are mounted within the container. In this case, there is a volume for storing access to credentials the registry needs for making requests against the OpenShift Container Platform API.

  6. Specify the volumes to provide for the pod. Volumes mount at the specified path. Do not mount to the container root, /, or any path that is the same in the host and the container. This can corrupt your host system if the container is sufficiently privileged, such as the host /dev/pts files. It is safe to mount the host by using /host.

  7. Each container in the pod is instantiated from its own container image.

  8. Pods making requests against the OpenShift Container Platform API is a common enough pattern that there is a serviceAccount field for specifying which service account user the pod should authenticate as when making the requests. This enables fine-grained access control for custom infrastructure components.

  9. The pod defines storage volumes that are available to its container(s) to use. In this case, it provides an ephemeral volume for a secret volume containing the default service account tokens. If you attach persistent volumes that have high file counts to pods, those pods can fail or can take a long time to start.

Viewing pods

You can refer to this user guide on how to view all pods, their usage statics (i.e. CPU, memory, and storage consumption) and logs in your project using the OpenShift CLI (oc) commands.

"},{"location":"openshift/applications/scaling-and-performance-guide/#compute-resources","title":"Compute Resources","text":"

Each container running on a node consumes compute resources, which are measurable quantities that can be requested, allocated, and consumed.

When authoring a pod configuration YAML file, you can optionally specify how much CPU, memory (RAM), and local ephemeral storage each container needs in order to better schedule pods in the cluster and ensure satisfactory performance as shown below:

CPU and memory can be specified in a couple of ways:

  • Resource requests and limits are optional parameters specified at the container level. OpenShift computes a Pod's request and limit as the sum of requests and limits across all of its containers. OpenShift then uses these parameters for scheduling and resource allocation decisions.

    The request value specifies the min value you will be guaranteed. The request value is also used by the scheduler to assign pods to nodes.

    Pods will get the amount of memory they request. If they exceed their memory request, they could be killed if another pod happens to need this memory. Pods are only ever killed when using less memory than requested if critical system or high priority workloads need the memory utilization.

    Likewise, each container within a Pod is granted the CPU resources it requests, subject to availability. Additional CPU cycles may be allocated if resources are available and not required by other active Pods/Jobs.

    Important Information

    If a Pod's total requests are not available on a single node, then the Pod will remain in a Pending state (i.e. not running) until these resources become available.

  • The limit value specifies the max value you can consume. Limit is the value applications should be tuned to use. Pods will be memory, CPU throttled when they exceed their available memory and CPU limit.

CPU is measured in units called millicores, where 1000 millicores (\"m\") = 1 vCPU or 1 Core. Each node in a cluster inspects the operating system to determine the amount of CPU cores on the node, then multiplies that value by 1000 to express its total capacity. For example, if a node has 2 cores, the node's CPU capacity would be represented as 2000m. If you wanted to use 1/10 of a single core, it would be represented as 100m.

Memory and ephemeral storage are measured in bytes. In addition, it may be used with SI suffixes (E, P, T, G, M, K) or their power-of-two-equivalents (Ei, Pi, Ti, Gi, Mi, Ki).

What happens if I did not specify the Compute Resources on Pod YAML?

If you don't specify the compute resources for your objects i.e. containers, to restrict them from running with unbounded compute resources from our cluster the objects will use the limit ranges specified for your project namespace. With limit ranges, we restrict resource consumption for specific objects in a project. You can also be able to view the current limit range for your project by going into the Administrator perspective and then navigating into the \"LimitRange details\" as shown below:

"},{"location":"openshift/applications/scaling-and-performance-guide/#how-to-specify-pod-to-use-gpu","title":"How to specify pod to use GPU?","text":"

So from a Developer perspective, the only thing you have to worry about is asking for GPU resources when defining your pods, with something like:

spec:\n  containers:\n  - name: app\n    image: ...\n    resources:\n      requests:\n        memory: \"64Mi\"\n        cpu: \"250m\"\n        nvidia.com/gpu: 1\n      limits:\n        memory: \"128Mi\"\n        cpu: \"500m\"\n

In the sample Pod Spec above, you can allocate GPUs to pods by specifying the GPU resource nvidia.com/gpu and indicating the desired number of GPUs. This number should not exceed the GPU quota specified by the value of the \"OpenShift Request on GPU Quota\" attribute that has been approved for your \"NERC-OCP (OpenShift)\" resource allocation on NERC's ColdFront as described here.

If you need to increase this quota value, you can request a change as explained here.

The \"resources\" section under \"containers\" with the nvidia.com/gpu specification indicates the number of GPUs you want in this container. Below is an example of a running pod YAML that requests the GPU device with a count of 2:

apiVersion: v1\nkind: Pod\nmetadata:\n  name: gpu-pod\nspec:\n  restartPolicy: Never\n  containers:\n    - name: cuda-container\n      image: nvcr.io/nvidia/k8s/cuda-sample:vectoradd-cuda10.2\n      command: [\"sleep\"]\n      args: [\"infinity\"]\n      resources:\n        limits:\n          nvidia.com/gpu: 2\n  nodeSelector:\n    nvidia.com/gpu.product: NVIDIA-A100-SXM4-40GB\n

On opened YAML editor paste the contents of the above given pod YAML as shown below:

After the pod is running, navigate to the pod details and execute the following command in the Terminal to view the currently available NVIDIA GPU devices:

Additionally, you can execute the following command to narrow down and retrieve the name of the GPU device:

nvidia-smi --query-gpu=gpu_name --format=csv,noheader --id=0 | sed -e 's/ /-/g'\n\nNVIDIA-A100-SXM4-40GB\n
"},{"location":"openshift/applications/scaling-and-performance-guide/#how-to-select-a-different-gpu-device","title":"How to select a different GPU device?","text":"

We can specify information about the GPU product type, family, count, and so on, as shown in the Pod Spec above. Also, these node labels can be used in the Pod Spec to schedule workloads based on criteria such as the GPU device name, as shown under nodeSelector as shown below:

apiVersion: v1\nkind: Pod\nmetadata:\n  name: gpu-pod2\nspec:\n  restartPolicy: Never\n  containers:\n    - name: cuda-container\n      image: nvcr.io/nvidia/k8s/cuda-sample:vectoradd-cuda10.2\n      command: [\"sleep\"]\n      args: [\"infinity\"]\n      resources:\n        limits:\n          nvidia.com/gpu: 1\n  nodeSelector:\n    nvidia.com/gpu.product: Tesla-V100-PCIE-32GB\n

When you run the nvidia-smi command in the terminal, you can observe the availability of the different V100 NVIDIA GPU device, as shown below:

"},{"location":"openshift/applications/scaling-and-performance-guide/#scaling","title":"Scaling","text":"

Scaling defines the number of pods or instances of the application you want to deploy. Bare pods not managed by a replication controller will not be rescheduled in the event of a node disruption. You can deploy your application using Deployment or Deployment Config objects to maintain the desired number of healthy pods and manage them from the web console. You can create deployment strategies that help reduce downtime during a change or an upgrade to the application. For more information about deployment, please read this.

Benefits of Scaling

This will allow for a quicker response to peaks in demand, and reduce costs by automatically scaling down when resources are no longer needed.

"},{"location":"openshift/applications/scaling-and-performance-guide/#scaling-application-pods-resources-and-observability","title":"Scaling application pods, resources and observability","text":"

The Topology view provides the details of the deployed components in the Overview panel. You can use the Details, Resources and Observe tabs to scale the application pods, check build status, services, routes, metrics, and events as follows:

Click on the component node to see the Overview panel to the right.

Use the Details tab to:

  • Scale your pods using the up and down arrows to increase or decrease the number of pods or instances of the application manually as shown below:

    Alternatively, we can easily configure and modify the pod counts by right-click the application to see the edit options available and selecting the Edit Pod Count as shown below:

  • Check the Labels, Annotations, and Status of the application.

Click the Resources tab to:

  • See the list of all the pods, view their status, access logs, and click on the pod to see the pod details.

  • See the builds, their status, access logs, and start a new build if needed.

  • See the services and routes used by the component.

Click the Observe tab to:

  • See the metrics to see CPU usage, Memory usage and Bandwidth consumption.

  • See the Events.

    Detailed Monitoring your project and application metrics

    On the left navigation panel of the Developer perspective, click Observe to see the Dashboard, Metrics, Alerts, and Events for your project. For more information about Monitoring project and application metrics using the Developer perspective, please read this.

"},{"location":"openshift/applications/scaling-and-performance-guide/#scaling-manually","title":"Scaling manually","text":"

To manually scale a DeploymentConfig object, use the oc scale command.

oc scale dc <dc_name> --replicas=<replica_count>\n

For example, the following command sets the replicas in the frontend DeploymentConfig object to 3.

oc scale dc frontend --replicas=3\n

The number of replicas eventually propagates to the desired and current state of the deployment configured by the DeploymentConfig object frontend.

Scaling applications based on a schedule (Cron)

You can also integrate schedule based scaling uses OpenShift/Kubernetes native resources called CronJob that execute a task periodically (date + time) written in Cron format. For example, scaling an app to 5 replicas at 0900; and then scaling it down to 1 pod at 2359. To learn more about this, please refer to this blog post.

"},{"location":"openshift/applications/scaling-and-performance-guide/#autoscaling","title":"AutoScaling","text":"

We can configure automatic scaling, or autoscaling, for applications to match incoming demand. This feature automatically adjusts the scale of a replication controller or deployment configuration based on metrics collected from the pods belonging to that replication controller or deployment configuration. You can create a Horizontal Pod Autoscaler (HPA) for any deployment, deployment config, replica set, replication controller, or stateful set.

For instance, if an application receives no traffic, it is scaled down to the minimum number of replicas configured for the application. Conversely, replicas can be scaled up to meet demand if traffic to the application increases.

"},{"location":"openshift/applications/scaling-and-performance-guide/#understanding-horizontal-pod-autoscalers-hpa","title":"Understanding Horizontal Pod Autoscalers (HPA)","text":"

You can create a horizontal pod autoscaler to specify the minimum and maximum number of pods you want to run, as well as the CPU utilization or memory utilization your pods should target.

Metric Description CPU Utilization Number of CPU cores used. Can be used to calculate a percentage of the pod\u2019s requested CPU. Memory Utilization Amount of memory used. Can be used to calculate a percentage of the pod\u2019s requested memory.

After you create a horizontal pod autoscaler, OCP begins to query the CPU and/or memory resource metrics on the pods. When these metrics are available, the HPA computes the ratio of the current metric utilization with the desired metric utilization, and scales up or down accordingly. The query and scaling occurs at a regular interval, but can take one to two minutes before metrics become available.

For replication controllers, this scaling corresponds directly to the replicas of the replication controller. For deployment configurations, scaling corresponds directly to the replica count of the deployment configuration. Note that autoscaling applies only to the latest deployment in the Complete phase.

For more information on how the HPA works, read this documentation.

Very Important Note

To implement the HPA, all targeted pods must have a Resource limits set on their containers. The HPA will not have CPU and Memory metrics until Resource limits are set. CPU request and limit must be set before CPU utilization can be set. Memory request and limit must be set before Memory utilization can be set.

"},{"location":"openshift/applications/scaling-and-performance-guide/#resource-limit","title":"Resource Limit","text":"

Resource limits control how much CPU and memory a container will consume on a node. You can specify a limit on how much memory and CPU an container can consume in both request and limit values. You can also specify the min request and max limit of a given container as well as the max ratio between request and limit. we can easily configure and modify the Resource Limit by right-click the application to see the edit options available as shown below:

Then selecting the Edit resource limits link to set the amount of CPU and Memory resources a container is guaranteed or allowed to use when running. In the pod specifications, you must specify the resource requests, such as CPU and memory as described here.

The HPA uses this specification to determine the resource utilization and then scales the target up or down. Utilization values are calculated as a percentage of the resource requests of each pod. Missing resource request values can affect the optimal performance of the HPA.

"},{"location":"openshift/applications/scaling-and-performance-guide/#creating-a-horizontal-pod-autoscaler-by-using-the-web-console","title":"Creating a horizontal pod autoscaler by using the web console","text":"

From the web console, you can create a HPA that specifies the minimum and maximum number of pods you want to run on a Deployment or DeploymentConfig object. You can also define the amount of CPU or memory usage that your pods should target. The HPA increases and decreases the number of replicas between the minimum and maximum numbers to maintain the specified CPU utilization across all pods.

"},{"location":"openshift/applications/scaling-and-performance-guide/#to-create-an-hpa-in-the-web-console","title":"To create an HPA in the web console","text":"
  • In the Topology view, click the node to reveal the side pane.

  • From the Actions drop-down list, select Add HorizontalPodAutoscaler as shown below:

  • This will open the Add HorizontalPodAutoscaler form as shown below:

    Configure via: Form or YAML View

    While creating or editing the horizontal pod autoscaler in the web console, you can switch from Form view to YAML view.

  • From the Add HorizontalPodAutoscaler form, define the name, minimum and maximum pod limits, the CPU and memory usage, and click Save.

"},{"location":"openshift/applications/scaling-and-performance-guide/#to-edit-an-hpa-in-the-web-console","title":"To edit an HPA in the web console","text":"
  • In the Topology view, click the node to reveal the side pane.

  • From the Actions drop-down list, select Edit HorizontalPodAutoscaler to open the Edit Horizontal Pod Autoscaler form.

  • From the Edit Horizontal Pod Autoscaler form, edit the minimum and maximum pod limits and the CPU and memory usage, and click Save.

"},{"location":"openshift/applications/scaling-and-performance-guide/#to-remove-an-hpa-in-the-web-console","title":"To remove an HPA in the web console","text":"
  • In the Topology view, click the node to reveal the side panel.

  • From the Actions drop-down list, select Remove HorizontalPodAutoscaler.

  • In the confirmation pop-up window, click Remove to remove the HPA.

Best Practices

Read this document to learn more about best practices regarding Horizontal Pod Autoscaler (HPA) autoscaling.

"},{"location":"openshift/decommission/decommission-openshift-resources/","title":"Decommission OpenShift Resources","text":"

You can decommission all of your NERC OpenShift resources sequentially as outlined below.

"},{"location":"openshift/decommission/decommission-openshift-resources/#prerequisite","title":"Prerequisite","text":"
  • Backup: Back up any critical data or configurations stored on the resources that going to be decommissioned. This ensures that important information is not lost during the process.

  • Kubernetes Objects (Resources): Please review all OpenShift Kubernetes Objects (Resources) to ensure they are not actively used and ready to be decommissioned.

  • Install and configure the OpenShift CLI (oc), see How to Setup the OpenShift CLI Tools for more information.

"},{"location":"openshift/decommission/decommission-openshift-resources/#delete-all-data-science-project-resources-from-the-nercs-red-hat-openshift-ai","title":"Delete all Data Science Project resources from the NERC's Red Hat OpenShift AI","text":"

Navigate to the NERC's Red Hat OpenShift AI (RHOAI) dashboard from the NERC's OpenShift Web Console via the web browser as described here.

Once you gain access to the NERC's RHOAI dashboard, you can click on specific Data Science Project (DSP) corresponding to the appropriate allocation of resources you want to clean up, as described here.

The NERC RHOAI dashboard will look like the one shown below, displaying all consumed resources:

"},{"location":"openshift/decommission/decommission-openshift-resources/#delete-all-workbenches","title":"Delete all Workbenches","text":"

Delete all workbenches by clicking on the three dots on the right side of the individual workbench and selecting Delete workbench, as shown below:

When prompted please confirm your workbench name and then click \"Delete workbench\" button as shown below:

"},{"location":"openshift/decommission/decommission-openshift-resources/#delete-all-cluster-storage","title":"Delete all Cluster Storage","text":"

Delete all cluster storage by clicking on the three dots on the right side of the individual cluster storage and selecting Delete storage, as shown below:

When prompted please confirm your cluster storage name and then click \"Delete storage\" button as shown below:

"},{"location":"openshift/decommission/decommission-openshift-resources/#delete-all-data-connections","title":"Delete all Data connections","text":"

Delete all data connections by clicking on the three dots on the right side of the individual data connection and selecting Delete data connection, as shown below:

When prompted please confirm your data connection name and then click \"Delete data connection\" button as shown below:

"},{"location":"openshift/decommission/decommission-openshift-resources/#delete-all-pipelines","title":"Delete all Pipelines","text":"

Delete all pipelines by clicking on the three dots on the right side of the individual pipeline and selecting Delete pipeline, as shown below:

When prompted please confirm your pipeline name and then click \"Delete pipeline\" button as shown below:

"},{"location":"openshift/decommission/decommission-openshift-resources/#delete-all-models-and-model-servers","title":"Delete all Models and Model Servers","text":"

Delete all model servers by clicking on the three dots on the right side of the individual pipeline and selecting Delete model server, as shown below:

When prompted please confirm your model server name and then click \"Delete model server\" button as shown below:

Important Note

Deleting Model Server will automatically delete ALL Models deployed on the model server.

Finally, the NERC RHOAI dashboard will look clean and empty without any resources, as shown below:

Now, you can return to \"OpenShift Web Console\" by using the application launcher icon (the black-and-white icon that looks like a grid), and choosing the \"OpenShift Console\" as shown below:

"},{"location":"openshift/decommission/decommission-openshift-resources/#delete-all-resources-from-the-nerc-openshift","title":"Delete all resources from the NERC OpenShift","text":"

Run oc login in your local machine's terminal using your own token to authenticate and access all your projects on the NERC OpenShift as described here. Please ensure you have already selected the correct project that needs to be decommissioned, as shown below:

oc login --token=<your_token> --server=https://api.shift.nerc.mghpcc.org:6443\nLogged into \"https://api.shift.nerc.mghpcc.org:6443\" as \"test1_user@fas.harvard.edu\" using the token provided.\n\nYou have access to the following projects and can switch between them with 'oc project <projectname>':\n\n    test-project-1\n* test-project-2\n    test-project-3\n\nUsing project \"test-project-2\".\n

Switching to your project that need to be decommissioned by running oc project <projectname> command:

oc project <your_openshift_project_to_decommission>\nUsing project \"<your_openshift_project_to_decommission>\" on server \"https://api.shift.nerc.mghpcc.org:6443\".\n

Please confirm the correct project is being selected by running oc project, as shown below:

oc project\nUsing project \"<your_openshift_project_to_decommission>\" on server \"https://api.shift.nerc.mghpcc.org:6443\".\n

Important Note: Best Practice for Specifying Namespace in oc Commands.

The best practice is to specify the namespace in each oc command using the -n option, e.g., -n <your_openshift_project_to_decommission>. This ensures that your commands are always executed in the intended project, minimizing the risk of affecting the wrong resources.

For example, the oc get all command can also be executed by specifying the namespace using the -n option, like this: oc get all -n <your_openshift_project_to_decommission>.

Please review all resources currently being used by your project by running oc get all, as shown below:

oc get all\n\nNAME                                                                  READY   STATUS             RESTARTS       AGE\npod/ds-pipeline-persistenceagent-pipelines-definition-868665f7z9lpm   1/1     Running            0              141m\n...\n\nNAME                                       TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                               AGE\nservice/ds-pipeline-pipelines-definition   ClusterIP   172.30.133.168   <none>        8443/TCP,8888/TCP,8887/TCP            141m\n...\n\nNAME                                                                 READY   UP-TO-DATE   AVAILABLE   AGE\ndeployment.apps/ds-pipeline-persistenceagent-pipelines-definition    1/1     1            1           141m\n...\n\nNAME                                                                            DESIRED   CURRENT   READY   AGE\nreplicaset.apps/ds-pipeline-persistenceagent-pipelines-definition-868665f748    1         1         1       141m\n...\n\nNAME                                                 IMAGE REPOSITORY\n                                                TAGS   UPDATED\nimagestream.image.openshift.io/simple-node-app-git   image-registry.openshift-image-registry.svc:5000/test-project-gpu-dc1e23/simple-node-app-git\n\nNAME                                                        HOST/PORT\n                                                PATH   SERVICES                           PORT            TERMINATION          WILDCARD\nroute.route.openshift.io/ds-pipeline-pipelines-definition   ds-pipeline-pipelines-definition-test-project-gpu-dc1e23.apps.shift.nerc.mghpcc.org          ds-pipeline-pipelines-definition   oauth           reencrypt/Redirect   None\n...\n

To list all Resources with their Names only.

To list all resources with their names only, you can run this command: oc get all -oname.

Here, -oname flag specifies the output format. In this case, it instructs the command to output only the names of the resources.

Run the oc delete command to delete all resource objects specified as parameters after --all within your selected project (namespace).

oc delete pod,deployment,deploymentconfig,pvc,route,service,build,buildconfig,\nstatefulset,replicaset,replicationcontroller,job,cronjob,imagestream,revision,\nconfiguration,notebook --all\n

Danger

The oc delete operation will cause all resources specfied will be deleted. This command can be very powerful and should be used with caution as it will delete all resources in the specified project.

Always ensure that you are targeting the correct project (namespace) when using this command to avoid unintentional deletion of resources. If you're unsure which namespace you're currently in, run the oc project command to display the current project. To be safe, you can also specify the namespace in all oc commands by using the -n option, e.g., -n <your_openshift_project_to_decommission>.

Make sure to backup any important data or configurations before executing this command to prevent accidental data loss.

Please check all the resources currently being used by your project by running oc get all, as shown below:

oc get all\nNAME                        TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)                               AGE\nservice/modelmesh-serving   ClusterIP   None         <none>        8033/TCP,8008/TCP,8443/TCP,2112/TCP   7m4s\n

Important Note

The last remaining service, i.e., service/modelmesh-serving, shown when running the oc get all command, is a REQUIRED resource, and so you don't need to clean it up.

"},{"location":"openshift/decommission/decommission-openshift-resources/#use-coldfront-to-reduce-the-storage-quota-to-zero","title":"Use ColdFront to reduce the Storage Quota to Zero","text":"

Each allocation, whether requested or approved, will be billed based on the pay-as-you-go model. The exception is for Storage quotas, where the cost is determined by your requested and approved allocation values to reserve storage from the total NESE storage pool. For NERC-OCP (OpenShift) Resource Allocations, storage quotas are specified by the \"OpenShift Request on Storage Quota (GiB)\" and \"OpenShift Limit on Ephemeral Storage Quota (GiB)\" allocation attributes.

Even if you have deleted all Persistent Volume Claims (PVC) in your OpenShift project. It is very essential to adjust the approved values for your NERC-OCP (OpenShift) resource allocations to zero (0) otherwise you will still be incurring a charge for the approved storage as explained in Billing FAQs.

To achieve this, you must submit a final change request to reduce the Storage Quotas for \"OpenShift Request on Storage Quota (GiB)\" and \"OpenShift Limit on Ephemeral Storage Quota (GiB)\" to zero (0) for your NERC-OCP (OpenShift) resource type. You can review and manage these resource allocations by visiting the resource allocations. Here, you can filter the allocation of your interest and then proceed to request a change request.

Very Important Note

Although other allocated resources i.e. CPU, RAM, GPU, etc. operate on a pay-as-you-go model, wherein charges are incurred solely based on usage, Active (Needs Renewal) allocations after \"End Date\" will remain accessible to the users assigned under the allocation. It is advisable to set all other allocation quota attributes to zero (0) during the change request. This measure ensures that existing users will not accidentally use the resources from the project.

Alternatively, PIs can control access to the allocation by removing users assigned to their NERC-OCP (OpenShift) allocation. This ensures that even if the allocation ends, users will not have access to the unused resources.

Please make sure your change request looks like this:

Wait until the requested resource allocation gets approved by the NERC's admin.

After approval, kindly review and verify that the quotas are accurately reflected in your resource allocation and OpenShift project. Please ensure that the approved quota values are accurately displayed as explained here.

"},{"location":"openshift/decommission/decommission-openshift-resources/#review-your-project-usage","title":"Review your Project Usage","text":"

Run the oc describe quota command to obtain detailed information about the resource quotas for all Resources defined within your selected project (namespace). Please note the name of the resource quota in the output of this command, i.e., <your_openshift_project_resource_quota_name>.

oc get quota\n\nNAME                              AGE   REQUEST                                                                               LIMIT\n<your_openshift_project_resource_quota_name>   105s   persistentvolumeclaims: 0/0, requests.nvidia.com/gpu: 0/0, requests.storage: 0/0   limits.cpu: 0/0, limits.ephemeral-storage: 0/0, limits.memory: 0/0\n

Very Important: Ensure No Resources that will be Billed are Used

Most importantly, ensure that there is no active usage for any of your currently allocated project resources.

To review the resource quota usage for your project, you can run oc describe quota <your_openshift_project_resource_quota_name>.

Please ensure the output appears as follows, with all Used and Hard resources having a value of zero (0) as shown below:

oc describe quota <your_openshift_project_resource_quota_name>\n\nName:                     <your_openshift_project_resource_quota_name>\nNamespace:                <your_openshift_project_to_decommission>\nResource                  Used  Hard\n--------                  ----  ----\nlimits.cpu                0     0\nlimits.ephemeral-storage  0     0\nlimits.memory             0     0\npersistentvolumeclaims    0     0\nrequests.nvidia.com/gpu   0     0\nrequests.storage          0     0\n

Important Information

Make sure to replace <your_openshift_project_resource_quota_name> with the actual name you find in the output, which is typically in this format: <your_openshift_project_to_decommission>-project.

"},{"location":"openshift/decommission/decommission-openshift-resources/#review-your-projects-resource-quota-from-the-openshift-web-console","title":"Review your Project's Resource Quota from the OpenShift Web Console","text":"

After removing all OpenShift resources and updating all resource quotas to set them to zero (0), you can review and verify that these changes are reflected in your OpenShift Web Console as well.

When you are logged-in to the NERC's OpenShift Web Console, you will be redirected to the Developer perspective which is shown selected on the perspective switcher located at the Left side. You need to switch to the Administrator perspective to view your Project's Resource Quota as shown below:

On the left sidebar, navigate to Administration -> ResourceQuotas.

Click on your appropriate project name, i.e., <your_openshift_project_to_decommission>, to view the Resource Quota details.

Very Important Note

It should also indicate that all resources have NO usage, i.e., zero (0), and also NO maximum set, i.e., zero (0), as shown below:

"},{"location":"openshift/decommission/decommission-openshift-resources/#finally-archive-your-coldfront-project","title":"Finally, Archive your ColdFront Project","text":"

As a PI, you will now be able to Archive your ColdFront Project via accessing NERC's ColdFront interface. Please refer to these intructions on how to archive your projects that need to be decommissioned.

"},{"location":"openshift/get-started/openshift-overview/","title":"OpenShift Overview","text":"

OpenShift is a multifaceted, container orchestration platform from Red Hat. OpenShift Container Platform is a cloud-based Kubernetes container platform. NERC offers a cloud development Platform-as-a-Service (PaaS) solution based on Red Hat's OpenShift Container Platform that provides isolated, multi-tenant containers for application development and deployment. This is optimized for continuous containerized application development and multi-tenant deployment which allows you and your team to focus on solving your research problems and not infrastructure management.

"},{"location":"openshift/get-started/openshift-overview/#basic-components-and-glossary-of-common-terms","title":"Basic Components and Glossary of common terms","text":"

OpenShift is a container orchestration platform that provides a number of components and tools to help you build, deploy, and manage applications. Here are some of the basic components of OpenShift:

  • Project: A project is a logical grouping of resources in the NERC's OpenShift platform that provides isolation from others resources.

  • Nodes: Nodes are the physical or virtual machines that run the applications and services in your OpenShift cluster.

  • Image: An image is a non-changing, definition of file structures and programs for running an application.

  • Container: A container is an instance of an image with the addition of other operating system components such as networking and running programs. Containers are used to run applications and services in OpenShift.

  • Pods: Pods are the smallest deployable units defined, deployed, and managed in OpenShift, that group related one or more containers that need to share resources.

  • Services: Services are logical representations of a set of pods that provide a network endpoint for access to the application or service. Services can be used to load balance traffic across multiple pods, and they can be accessed using a stable DNS name. Services are assigned an IP address and port and proxy connections to backend pods. This allows the pods to change while the connection details of the service remain consistent.

  • Volume: A volume is a persistent file space available to pods and containers for storing data. Containers are immutable and therefore upon a restart any contents are cleared and reset to the original state of the image used to create the container. Volumes provide storage space for files that need to persist through container restarts.

  • Routes: Routes can be used to expose services to external clients to connections outside the platform. A route is assigned a name in DNS when set up to make it easily accessible. They can be configured with custom hostnames and TLS certificates.

  • Replication Controllers: A replication controller (rc) is a built-in mechanism that ensures a defined number of pods are running at all times. An asset that indicates how many pod replicas are required to run at a time. If a pod unexpectedly quits or is deleted, a new copy of the pod is created and started. Additionally, if more pods are running than the defined number, the replication controller will delete the extra pods to get down to the defined number.

  • Namespace: A Namespace is a way to logically isolate resources within the Cluster. In our case every project gets an unique namespace.

  • Role-based access control (RBAC): A key security control to ensure that cluster users and workloads have only access to resources required to execute their roles.

  • Deployment Configurations: A deployment configuration (dc) is an extension of a replication controller that is used to push out a new version of application code. Deployment configurations are used to define the process of deploying applications and services to OpenShift. Deployment configurations can be used to specify the number of replicas, the resources required by the application, and the deployment strategy to use.

  • Application URL Components: When an application developer adds an application to a project, a unique DNS name is created for the application via a Route. All application DNS names will have a hyphen separator between your application name and your unique project namespace. If the application is a web application, this DNS name is also used for the URL to access the application. All names are in the form of <appname>-<mynamespace>.apps.shift.nerc.mghpcc.org. For example: mytestapp-mynamespace.apps.shift.nerc.mghpcc.org.

"},{"location":"openshift/logging-in/access-the-openshift-web-console/","title":"Access the NERC's OpenShift Web Console","text":"

The NERC's OpenShift Container Platform web console is a user interface that can be accessed via the web.

You can find it at https://console.apps.shift.nerc.mghpcc.org.

The NERC Authentication supports CILogon using Keycloak for gateway authentication and authorization that provides federated login via your institution accounts and it is the recommended authentication method.

Make sure you are selecting \"mss-keycloak\" as shown here:

Next, you will be redirected to CILogon welcome page as shown below:

MGHPCC Shared Services (MSS) Keycloak will request approval of access to the following information from the user:

  • Your CILogon user identifier

  • Your name

  • Your email address

  • Your username and affiliation from your identity provider

which are required in order to allow access your account on NERC's OpenStack web console.

From the \"Selected Identity Provider\" dropdown option, please select your institution's name. If you would like to remember your selected institution name for future logins please check the \"Remember this selection\" checkbox this will bypass the CILogon welcome page on subsequent visits and proceed directly to the selected insitution's identity provider(IdP). Click \"Log On\". This will redirect to your respective institutional login page where you need to enter your institutional credentials.

Important Note

The NERC does not see or have access to your institutional account credentials, it points to your selected insitution's identity provider and redirects back once authenticated.

Once you successfully authenticate you should see a graphical user interface to visualize your project data and perform administrative, management, and troubleshooting tasks.

I can't find my project

If you are a member of several projects i.e. ColdFront NERC-OCP (OpenShift) allocations, you may need to switch the project before you can see and use OpenShift resources you or your team has created. Clicking on the project dropdown which is displayed near the top left side will popup the list of projects you are in. You can search and select the new project by hovering and clicking on the project name in that list as shown below:

Important Note

The default view for the OpenShift Container Platform web console is the Developer perspective.

"},{"location":"openshift/logging-in/setup-the-openshift-cli/","title":"How to Setup the OpenShift CLI Tools","text":"

The most commonly used command-line client tool for the NERC's OpenShift is OpenShift CLI (oc). It is available for Linux, Windows, or macOS and allows you to create applications and manage OpenShift Container Platform projects from a terminal.

"},{"location":"openshift/logging-in/setup-the-openshift-cli/#installing-the-openshift-cli","title":"Installing the OpenShift CLI","text":"

Installation options for the CLI vary depending on your Operating System (OS). You can install the OpenShift CLI (oc) either by downloading the binary or by using an Package Manager (RPM).

Unlike the web console, it allows the user to work directly with the project source code using command scripts once they are authenticated using token.

You can download the latest oc CLI client tool binary from web console as shown below:

Then add it to your path environment based on your OS choice by following this documentation.

"},{"location":"openshift/logging-in/setup-the-openshift-cli/#configuring-the-openshift-cli","title":"Configuring the OpenShift CLI","text":"

You can configure the oc command tool to enable tab completion to automatically complete oc commands or suggest options when you press Tab for the Bash or Zsh shells by following these steps.

"},{"location":"openshift/logging-in/setup-the-openshift-cli/#first-time-usage","title":"First Time Usage","text":"

Before you can use the oc command-line tool, you will need to authenticate to the NERC's OpenShift platform by running built-in login command obtained from the NERC's OpenShift Web Console. This will allow authentication and enables you to work with your NERC's OpenShift Container Platform projects. It will create a session that will last approximately 24 hours.

To get the oc login command with your own unique token, please login to the NERC's OpenShift Web Console and then under your user profile link located at the top right corner, click on Copy login command as shown below:

It will once again ask you to provide your KeyCloak login and then once successful it will redirect you to a static page with a link to Display Token as shown below:

Clicking on that \"Display Token\" link it will show a static page with Login command with token as shown below:

Copy and run the generated command on your terminal to authenticate yourself to access the project from your terminal i.e. oc login --token=<Your-Token> --server=https://<NERC-OpenShift-Server>

If you try to run an oc command and get a permission denied message, your login session has likely expired and you will need to re-generate the oc login command from your NERC's OpenShift Web Console and then run the new oc login command with new token on your terminal.

"},{"location":"openshift/logging-in/setup-the-openshift-cli/#other-useful-oc-commands","title":"Other Useful oc Commands","text":"

This reference document provides descriptions and example commands for OpenShift CLI (oc) developer commands.

Important Note

Run oc help to list all commands or run oc <command> --help to get additional details for a specific command.

"},{"location":"openshift/logging-in/the-openshift-cli/","title":"OpenShift command-line interface (CLI) Tools Overview","text":"

With the OpenShift CLI, the oc command, you can create applications and manage OpenShift Container Platform projects from a terminal.

The web console provides a comprehensive set of tools for managing your projects and applications. There are, however, some tasks that can only be performed using a command-line tool called oc.

The OpenShift CLI is ideal in the following situations:

  • Working directly with project source code

  • Scripting OpenShift Container Platform operations

  • Managing projects while restricted by bandwidth resources and the web console is unavailable

It is recommended that developers should be comfortable with simple command-line tasks and the the NERC's OpenShift command-line tool.

"},{"location":"openshift/logging-in/web-console-overview/","title":"Web Console Overview","text":"

The NERC's OpenShift Container Platform (OCP) has a web-based console that can be used to perform common management tasks such as building and deploying applications.

You can find it at https://console.apps.shift.nerc.mghpcc.org.

The web console provides tools to access and manage your application code and data.

Below is a sample screenshot of the web interface with labels describing different sections of the NERC's OpenShift Web Console:

  1. Perspective Switcher - Drop-down to select a different perspective. The available perspectives are a Developer view and an Administrator view.

  2. Project List - Drop-down to select a different project. Based on user's active and approved resource allocations this projects list will be updated.

  3. Navigation Menu - Menu options to access different tools and settings for a project. The list will change depending on which Perspective view you are in.

  4. User Preferences - Shown the option to get and copy the OpenShift Command Line oc login command and set your individual console preferences including default views, language, import settings, and more.

  5. View Switcher - This three dot menu is used to switch between List View and Graph view of all your applications.

  6. Main Panel - Displays basic application information. Clicking on the application names in the main panel expands the Details Panel (7).

  7. Details Panel - Displays additional information about the application selected from the Main Panel. This includes detailed information about the running application, applications builds, routes, and more. Tabs at the top of this panel will change the view to show additional information such as Details and Resources.

"},{"location":"openshift/logging-in/web-console-overview/#perspective-switcher","title":"Perspective Switcher","text":"

When you are logged-in, you will be redirected to the Developer perspective which is shown selected on the perspective switcher located at the Left side. You can switch between the Administrator perspective and the Developer perspective as per your roles and permissions in a project.

"},{"location":"openshift/logging-in/web-console-overview/#about-the-administrator-perspective-in-the-web-console","title":"About the Administrator perspective in the web console","text":"

The Administrator perspective enables you to view the cluster inventory, capacity, general and specific utilization information, and the stream of important events, all of which help you to simplify planning and troubleshooting tasks. Both project administrators and cluster administrators can view the Administrator perspective.

Important Note

The default web console perspective that is shown depends on the role of the user. The Administrator perspective is displayed by default if the user is recognized as an administrator.

"},{"location":"openshift/logging-in/web-console-overview/#about-the-developer-perspective-in-the-web-console","title":"About the Developer perspective in the web console","text":"

The Developer perspective offers several built-in ways to deploy applications, services, and databases.

Important Note

The default view for the OpenShift Container Platform web console is the Developer perspective.

The web console provides a comprehensive set of tools for managing your projects and applications.

"},{"location":"openshift/logging-in/web-console-overview/#project-list","title":"Project List","text":"

You can select or switch your projects from the available project drop-down list located on top navigation as shown below:

Important Note

You can identify the currently selected project with tick mark and also you can click on star icon to keep the project under your Favorites list.

"},{"location":"openshift/logging-in/web-console-overview/#navigation-menu","title":"Navigation Menu","text":""},{"location":"openshift/logging-in/web-console-overview/#topology","title":"Topology","text":"

The Topology view in the Developer perspective of the web console provides a visual representation of all the applications within a project, their build status, and the components and services associated with them. If you have no workloads or applications in the project, the Topology view displays the available options to create applications. If you have existing workloads, the Topology view graphically displays your workload nodes. To read more about how to view the topology of your application please read this official documentation from Red Hat

"},{"location":"openshift/logging-in/web-console-overview/#observe","title":"Observe","text":"

This provides you with a Dashboard to view the resource usage and also other metrics and events that occured on your project. Here you can identify, monitor, and inspect the usage of Memory, CPU, Network, and Storage in your project.

"},{"location":"openshift/logging-in/web-console-overview/#search","title":"Search","text":"

This allows you to search any resources based on search criteria like Label or Name.

"},{"location":"openshift/logging-in/web-console-overview/#builds","title":"Builds","text":"

This menu provides tools for building and deploying applications. You can use it to create and manage build configurations using YAML syntax, as well as view the status and logs of your builds.

"},{"location":"openshift/logging-in/web-console-overview/#helm","title":"Helm","text":"

You can enable the Helm Charts here. Helm Charts is the pacakge manager that help to easily manage definitions, installations and upgrades of you complex application. It also shows catalog of all available helm charts for you to use by installing them.

"},{"location":"openshift/logging-in/web-console-overview/#project","title":"Project","text":"

This allows you to view the overview of the currently selected project from the drop-down list and also details about it including resource utilization and resource quotas.

"},{"location":"openshift/logging-in/web-console-overview/#configmaps","title":"ConfigMaps","text":"

This menu allows you to view or create a new ConfigMap by entering manually YAML or JSON definitions, or by dragging and dropping a file into the editor.

"},{"location":"openshift/logging-in/web-console-overview/#secrets","title":"Secrets","text":"

This allows you to view or create Secrets that allows to inject sensitive data into your application as files or environment variables.

"},{"location":"openshift/storage/storage-overview/","title":"Storage Overview","text":"

The NERC OCP supports multiple types of storage.

"},{"location":"openshift/storage/storage-overview/#glossary-of-common-terms-for-ocp-storage","title":"Glossary of common terms for OCP storage","text":"

This glossary defines common terms that are used in the storage content.

"},{"location":"openshift/storage/storage-overview/#storage","title":"Storage","text":"

OCP supports many types of storage, both for on-premise and cloud providers. You can manage container storage for persistent and non-persistent data in an OCP cluster.

"},{"location":"openshift/storage/storage-overview/#storage-class","title":"Storage class","text":"

A storage class provides a way for administrators to describe the classes of storage they offer. Different classes might map to quality of service levels, backup policies, arbitrary policies determined by the cluster administrators.

"},{"location":"openshift/storage/storage-overview/#storage-types","title":"Storage types","text":"

OCP storage is broadly classified into two categories, namely ephemeral storage and persistent storage.

"},{"location":"openshift/storage/storage-overview/#ephemeral-storage","title":"Ephemeral storage","text":"

Pods and containers are ephemeral or transient in nature and designed for stateless applications. Ephemeral storage allows administrators and developers to better manage the local storage for some of their operations. For more information about ephemeral storage overview, types, and management, see Understanding ephemeral storage.

Pods and containers can require temporary or transient local storage for their operation. The lifetime of this ephemeral storage does not extend beyond the life of the individual pod, and this ephemeral storage cannot be shared across pods.

"},{"location":"openshift/storage/storage-overview/#persistent-storage","title":"Persistent storage","text":"

Stateful applications deployed in containers require persistent storage. OCP uses a pre-provisioned storage framework called persistent volumes (PV) to allow cluster administrators to provision persistent storage. The data inside these volumes can exist beyond the lifecycle of an individual pod. Developers can use persistent volume claims (PVCs) to request storage requirements. For more information about persistent storage overview, configuration, and lifecycle, see Understanding persistent storage.

Pods and containers can require permanent storage for their operation. OpenShift Container Platform uses the Kubernetes persistent volume (PV) framework to allow cluster administrators to provision persistent storage for a cluster. Developers can use PVC to request PV resources without having specific knowledge of the underlying storage infrastructure.

"},{"location":"openshift/storage/storage-overview/#persistent-volumes-pv","title":"Persistent volumes (PV)","text":"

OCP uses the Kubernetes persistent volume (PV) framework to allow cluster administrators to provision persistent storage for a cluster. Developers can use PVC to request PV resources without having specific knowledge of the underlying storage infrastructure.

"},{"location":"openshift/storage/storage-overview/#persistent-volume-claims-pvcs","title":"Persistent volume claims (PVCs)","text":"

You can use a PVC to mount a PersistentVolume into a Pod. You can access the storage without knowing the details of the cloud environment.

Important Note

A PVC is in active use by a pod when a Pod object exists that uses the PVC.

"},{"location":"openshift/storage/storage-overview/#access-modes","title":"Access modes","text":"

Volume access modes describe volume capabilities. You can use access modes to match persistent volume claim (PVC) and persistent volume (PV). The following are the examples of access modes:

Storage Class Description ReadWriteOnce (RWO) Allows read-write access to the volume by a single node at a time. ReadOnlyMany (ROX) Allows multiple nodes to read from the volume simultaneously, but only one node can write to it. ReadWriteMany (RWX) Allows multiple nodes to read from and write to the volume simultaneously. ReadWriteOncePod (RWOP) Allows read-write access to the volume by multiple pods running on the same node simultaneously."},{"location":"openshift-ai/","title":"Red Hat OpenShift AI (RHOAI) Tutorial Index","text":"

If you're just starting out, we recommend starting from Red Hat OpenShift AI (RHOAI) Overview and going through the tutorial in order.

If you just need to review a specific step, you can find the page you need in the list below.

"},{"location":"openshift-ai/#nerc-openshift-ai-getting-started","title":"NERC OpenShift AI Getting Started","text":"
  • NERC Red Hat OpenShift AI (RHOAI) Overview <<-- Start Here
"},{"location":"openshift-ai/#nerc-openshift-ai-dashboard","title":"NERC OpenShift AI dashboard","text":"
  • Access the NERC's OpenShift AI dashboard

  • The NERC's OpenShift AI dashboard Overview

"},{"location":"openshift-ai/#using-data-science-project-in-the-nerc-rhoai","title":"Using Data Science Project in the NERC RHOAI","text":"
  • Using Your Data Science Project (DSP)

  • Explore the JupyterLab Environment

  • Model Serving in the NERC RHOAI

  • Test the Model in the NERC RHOAI

"},{"location":"openshift-ai/#other-example-projects","title":"Other Example Projects","text":"
  • How to access, download, and analyze data for S3 usage

  • Configure a Jupyter Notebook to use GPUs for AI/ML modeling

"},{"location":"openshift-ai/data-science-project/explore-the-jupyterlab-environment/","title":"Explore the JupyterLab Environment","text":"

When your workbench is ready, the status will change to Running and you can select \"Open\" to go to your environment:

How can I start or stop a Workbench?

You can use this \"toggle switch\" under the \"Status\" section to easily start/stop this environment later on.

Make sure you are selecting \"mss-keycloak\" once shown:

Authorize the requested permissions if needed:

This will initiate your JupyterLab environment based on the Jupyter Image you have selected. JupyterLab offers a shared interactive integrated development environment.

Once you successfully authenticate you should see the NERC RHOAI JupyterLab Web Interface as shown below:

It's pretty empty right now, though. The first thing we will do is add content into this environment by using Git.

"},{"location":"openshift-ai/data-science-project/explore-the-jupyterlab-environment/#clone-a-git-repository","title":"Clone a Git repository","text":"

You can clone a Git repository in JupyterLab through the left-hand toolbar or the Git menu option in the main menu as shown below:

Let's clone a repository using the left-hand toolbar. Click on the Git icon, shown in below:

Then click on Clone a Repository as shown below:

Enter the git repository URL, which points to the end-to-end ML workflows demo project i.e. https://github.com/nerc-project/nerc_rhoai_mlops.

Then click Clone button as shown below:

What is MLOps?

Machine learning operations (MLOps) are a set of practices that automate and simplify machine learning (ML) workflows and deployments.

Cloning takes a few seconds, after which you can double-click and navigate to the newly-created folder that contains your cloned Git repository.

"},{"location":"openshift-ai/data-science-project/explore-the-jupyterlab-environment/#exploring-the-example-nerc-mlops-project","title":"Exploring the Example NERC MLOps Project","text":"

You will be able to find the newly-created folder named nerc_rhoai_mlops based on the Git repository name, as shown below:

"},{"location":"openshift-ai/data-science-project/explore-the-jupyterlab-environment/#working-with-notebooks","title":"Working with notebooks","text":""},{"location":"openshift-ai/data-science-project/explore-the-jupyterlab-environment/#whats-a-notebook","title":"What's a notebook?","text":"

A notebook is an environment where you have cells that can display formatted text, or code.

This is an empty cell:

And a cell where we have entered some Python code:

  • Code cells contain Python code that can be run interactively. It means that you can modify the code, then run it, but only for this cell, not for the whole content of the notebook! The code will not run on your computer or in the browser, but directly in the environment you are connected to NERC RHOAI.

  • To run a code cell, you simply select it (select the cell, or on the left side of it), and select the Run/Play button from the toolbar (you can also press CTRL+Enter to run a cell, or Shift+Enter to run the cell and automatically select the following one).

The Run button on the toolbar:

As you will see, you then get the result of the code that was run in that cell (if the code produces some output), as well as information on when this particular cell has been run.

When you save a notebook, the code as well as all the results are saved! So you can always reopen it to look at the results without having to run all the program again, while still having access to the code that produced this content.

More about Notebook

Notebooks are so named because they are just like a physical Notebook. It is exactly like if you were taking notes about your experiments (which you will do), along with the code itself, including any parameters you set. You see the output of the experiment inline (this is the result from a cell once it is run), along with all the notes you want to take (to do that, you can switch the cell type from the menu from Code to Markup).

"},{"location":"openshift-ai/data-science-project/explore-the-jupyterlab-environment/#sample-jupyter-notebook-files","title":"Sample Jupyter Notebook files","text":"

In your Jupyter environment, you can navigate and select any Jupyter notebook files by double-clicking them in the file explorer on the left side. Double-click the notebook file to launch it. This action will open another tab in the content section of the environment, on the right.

Here, you can find three primary starter notebooks for setting up the intelligent application: 01_sandbox.ipynb, 02_model_training_basics.ipynb, and 03_remote_inference.ipynb within the root folder path of nerc_rhoai_mlops.

You can click and run 01_sandbox.ipynb to verify the setup JupyterLab environment can run python code properly.

Also, you can find the \"samples\" folder within the root folder path of nerc_rhoai_mlops. For learning purposes, double-click on the \"samples\" folder under the newly-created folder named nerc_rhoai_mlops. Within the \"samples\" folder, you'll find some starter Jupyter notebook files: Intro.ipynb, Lorenz.ipynb, and gpu.ipynb. These files can be used to test basic JupyterLab functionalities. You can explore them at your own pace by running each of them individually. Please feel free to experiment, run the different cells, add some more code. You can do what you want - it is your environment, and there is no risk of breaking anything or impacting other users. This environment isolation is also a great advantage brought by NERC RHOAI.

How to get access to the NERC RHOAI Dashboard from JupyterLab Environment?

If you had closed the NERC RHOAI dashboard, you can access it from your currently opened JupyterLab IDE by clicking on File -> Hub Control Panel as shown below:

"},{"location":"openshift-ai/data-science-project/explore-the-jupyterlab-environment/#testing-for-gpu-code","title":"Testing for GPU Code","text":"

As we have setup the workbench specifing the desired Number of GPUs: \"1\", we will be able to test GPU based code running gpu.ipynb notebook file as shown below:

"},{"location":"openshift-ai/data-science-project/explore-the-jupyterlab-environment/#training-a-model","title":"Training a model","text":"

Within the root folder path of nerc_rhoai_mlops, find a sample Jupyter notebook file 02_model_training_basics.ipynb that demonstrates how to train a model within the NERC RHOAI. To run it you need to double click it and execute the \"Run\" button to run all notebook cells at once. This is used to train your model for \"Basic classification of clothing images\" by importing the publicly available Fashion MNIST dataset and using TensorFlow. This process will take some time to complete. At the end, it will generate and save the model my-model.keras within the root folder path of nerc_rhoai_mlops.

The Machine Learning Model File Hosted on NERC OpenStack Object Bucket.

The model we are going to use is an object detection model that is able to isolate and recognize T-shirts, bottles, and hats in pictures. Although the process is globally the same one as what we have seen in the previous section, this model has already been trained as it takes a few hours with the help of a GPU to do it. If you want to know more about this training process, you can have a look here.

The resulting model has been saved in the ONNX format, an open standard for machine learning interoperability, which is one we can use with OpenVINO and RHOAI model serving. The model has been stored and is available for download in NERC OpenStack Object Storage container as described here.

"},{"location":"openshift-ai/data-science-project/model-serving-in-the-rhoai/","title":"Model Serving in the NERC RHOAI","text":"

Prerequisites:

To run a model server and deploy a model on it, you need to have:

  • Select the correct data science project and create workbench, see Populate the data science project for more information.
"},{"location":"openshift-ai/data-science-project/model-serving-in-the-rhoai/#create-a-data-connection","title":"Create a data connection","text":"

Once we have our workbench and cluster storage set up, we can add data connections. Click the \"Add data connection\" button to open the data connection configuration window as shown below:

Data connections are configurations for remote data location. Within this window, enter the information about the S3-compatible object bucket where the model is stored. Enter the following information:

  • Name: The name you want to give to the data connection.

  • Access Key: The access key to the bucket.

  • Secret Key: The secret for the access key.

  • Endpoint: The endpoint to connect to the storage.

  • Region: The region to connect to the storage.

  • Bucket: The name of the bucket.

NOTE: However, you are not required to use the S3 service from Amazon Web Services (AWS). Any S3-compatible storage i.e. NERC OpenStack Container (Ceph), Minio, AWS S3, etc. is supported.

For our example project, let's name it \"ocp-nerc-container-connect\", we'll select the \"us-east-1\" as Region, choose \"ocp-container\" as Bucket.

The API Access EC2 credentials can be downloaded and accessed from the NERC OpenStack Project as described here. This credential file contains information regarding Access Key, Secret Key, and Endpoint.

Very Important Note: If you are using an AWS S3 bucket, the Endpoint needs to be set as https://s3.amazonaws.com/. However, for the NERC Object Storage container, which is based on the Ceph backend, the Endpoint needs to be set as https://stack.nerc.mghpcc.org:13808, and the Region should be set as us-east-1.

How to store & connect to the model file in the object storage bucket?

The model file(s) should have been saved into an S3-compatible object storage bucket (NERC OpenStack Container [Ceph], Minio, or AWS S3) for which you must have the connection information, such as location and credentials. You can create a bucket on your active project at the NERC OpenStack Project by following the instructions in this guide.

The API Access EC2 credentials can be downloaded and accessed from the NERC OpenStack Project as described here.

For our example project, we are creating a bucket named \"ocp-container\" in one of our NERC OpenStack project's object storage. Inside this bucket, we have added a folder or directory called \"coolstore-model\", where we will store the model file in ONNX format, as shown here:

ONNX: An open standard for machine learning interoperability.

After completing the required fields, click Add data connection. You should now see the data connection displayed in the main project window as shown below:

"},{"location":"openshift-ai/data-science-project/model-serving-in-the-rhoai/#create-a-model-server","title":"Create a model server","text":"

After creating the data connection, you can add your model server. Select Add server as shown below:

In the pop-up window that appears, depicted as shown below, you can specify the following details:

  • Model server name

  • Serving runtime: either \"OpenVINO Model Server\" or \"OpenVINO Model Server (Supports GPUs)\"

  • Number of model server replicas: This is the number of instances of the model server engine that you want to deploy. You can scale it up as needed, depending on the number of requests you will receive.

  • Model server size: This is the amount of resources, CPU, and RAM that will be allocated to your server. Select the appropriate configuration for size and the complexity of your model.

  • Model route: Check this box if you want the serving endpoint (the model serving API) to be accessible outside of the OpenShift cluster through an external route.

  • Token authorization: Check this box if you want to secure or restrict access to the model by forcing requests to provide an authorization token.

After adding and selecting options within the Add model server pop-up window, click Add to create the model server.

For our example project, let's name the Model server as \"coolstore-modelserver\". We'll select the OpenVINO Model Server in Serving runtime. Leave replicas to \"1\", size to \"Small\". At this point, don't check Make model available via an external route as shown below:

NERC RHOAI supported Model Server Runtimes

NERC RHOAI integrates the Intel's OpenVINO Model Server runtime, a high-performance system for serving models, optimized for deployment on Intel architectures. Also, NERC RHOAI offers OpenVINO Model Server serving runtime that supports GPUs.

Once you've configured your model server, you can deploy your model by clicking on \"Deploy model\" located on the right side of the running model server. Alternatively, you can also do this from the main RHOAI dashboard's \"Model Serving\" menu item as shown below:

If you wish to view details for the model server, click on the link corresponding to the Model Server's Name. You can also modify a model server configuration by clicking on the three dots on the right side, and selecting Edit model server. This will bring back the same configuration page we used earlier. This menu also have option for you to delete the model server.

"},{"location":"openshift-ai/data-science-project/model-serving-in-the-rhoai/#deploy-the-model","title":"Deploy the model","text":"

To add a model to be served, click the Deploy model button. Doing so will initiate the Deploy model pop-up window as shown below:

Enter the following information for your new model:

  • Model Name: The name you want to give to your model (e.g., \"coolstore\").

  • Model framework (name-version): The framework used to save this model. At this time, OpenVINO IR or ONNX or Tensorflow are supported.

  • Model location: Select the data connection that you created to store the model. Alternatively, you can create another data connection directly from this menu.

  • Folder path: If your model is not located at the root of the bucket of your data connection, you must enter the path to the folder it is in.

For our example project, let's name the Model as \"coolstore\", select \"onnx-1\" for the framework, select the Data location you created before for the Model location, and enter \"coolstore-model\" as the folder path for the model (without leading /).

When you are ready to deploy your model, select the Deploy button.

When you return to the Deployed models page, you will see your newly deployed model. You should click on the 1 on the Deployed models tab to see details. When the model has finished deploying, the status icon will be a green checkmark indicating the model deployment is complete as shown below:

The model is now accessible through the API endpoint of the model server. The information about the endpoint is different, depending on how you configured the model server.

If you did not expose the model externally through a route, click on the Internal Service link in the Inference endpoint section. A popup will display the address for the gRPC and the REST URLs for the inference endpoints as shown below:

Notes:

  • The REST URL displayed is only the base address of the endpoint. You must append /v2/models/name-of-your-model/infer to it to have the full address. Example: http://modelmesh-serving.model-serving:8008/v2/models/coolstore/infer

  • The full documentation of the API (REST and gRPC) is available here.

  • The gRPC proto file for the Model Server is available here.

  • If you have exposed the model through an external route, the Inference endpoint displays the full URL that you can copy.

Important Note

Even when you expose the model through an external route, the internal ones are still available. They use this format:

  • REST: http://modelmesh-serving.name-of-your-project:8008/v2/models/name-of-your-model/infer

  • gRPC: grpc://modelmesh-serving.name-of-your-project:8033. Please make note of the grpc URL value, we will need it later.

Your model is now deployed and ready to use!

"},{"location":"openshift-ai/data-science-project/testing-model-in-the-rhoai/","title":"Test the Model in the NERC RHOAI","text":"

Now that the model server is ready to receive requests, we can test it.

How to get access to the NERC RHOAI Dashboard from JupyterLab Environment?

If you had closed the NERC RHOAI dashboard, you can access it from your currently opened JupyterLab IDE by clicking on File -> Hub Control Panel as shown below:

  • In your project in JupyterLab, open the notebook 03_remote_inference.ipynb and follow the instructions to see how the model can be queried.

  • Update the grpc_url as noted before for the the grpc URL value from the deployed model on the NERC RHOAI Model server.

  • Once you've completed the notebook's instructions, the object detection model can isolate and recognize T-shirts, bottles, and hats in pictures, as shown below:

"},{"location":"openshift-ai/data-science-project/testing-model-in-the-rhoai/#building-and-deploying-an-intelligent-application","title":"Building and deploying an intelligent application","text":"

The application we are going to deploy is a simple example of how you can add an intelligent feature powered by AI/ML to an application. It is a webapp that you can use on your phone to discover coupons on various items you can see in a store, in an augmented reality way.

"},{"location":"openshift-ai/data-science-project/testing-model-in-the-rhoai/#architecture","title":"Architecture","text":"

The different components of this intelligent application are:

\u2022 The Frontend: a React application, typically running on the browser of your phone,

\u2022 The Backend: a NodeJS server, serving the application and relaying API calls,

\u2022 The Pre-Post Processing Service: a Python FastAPI service, doing the image pre-processing, calling the model server API, and doing the post-processing before sending the results back.

\u2022 The Model Server: the RHOAI component serving the model as an API to do the inference.

"},{"location":"openshift-ai/data-science-project/testing-model-in-the-rhoai/#application-workflow-steps","title":"Application Workflow Steps","text":"
  1. Pass the image to the pre-post processing service

  2. Pre-process the image and call the model server

  3. Send back the inference result

  4. Post-process the inference and send back the result

  5. Pass the result to the frontend for display

"},{"location":"openshift-ai/data-science-project/testing-model-in-the-rhoai/#deploy-the-application","title":"Deploy the application","text":"

The deployment of the application is really easy, as we already created for you the necessary YAML files. They are included in the Git project we used for this example project. You can find them in the deployment folder inside your JupyterLab environment, or directly here.

To deploy the Pre-Post Processing Service service and the Application:

  • From your NERC's OpenShift Web Console, navigate to your project corresponding to the NERC RHOAI Data Science Project and select the \"Import YAML\" button, represented by the \"+\" icon in the top navigation bar as shown below:

  • Verify that you selected the correct project.

  • Copy/Paste the content of the file pre_post_processor_deployment.yaml inside the opened YAML editor. If you have named your model coolstore as instructed, you're good to go. If not, modify the value on line # 35 with the name you set. You can then click the Create button as shown below:

  • Once Resource is successfully created, you will see the following screen:

  • Click on \"Import more YAML\" and Copy/Paste the content of the file intelligent_application_deployment.yaml inside the opened YAML editor. Nothing to change here, you can then click the Create button as shown below:

  • If both deployments are successful, you will be able to see both of them grouped under \"intelligent-application\" on the Topology View menu, as shown below:

"},{"location":"openshift-ai/data-science-project/testing-model-in-the-rhoai/#use-the-application","title":"Use the application","text":"

The application is relatively straightforward to use. Click on the URL for the Route ia-frontend that was created.

You have first to allow it to use your camera, this is the interface you get:

You have:

  • The current view of your camera.

  • A button to take a picture as shown here:

  • A button to switch from front to rear camera if you are using a phone:

  • A QR code that you can use to quickly open the application on a phone (much easier than typing the URL!):

When you take a picture, it will be sent to the inference service, and you will see which items have been detected, and if there is a promotion available as shown below:

"},{"location":"openshift-ai/data-science-project/testing-model-in-the-rhoai/#tweak-the-application","title":"Tweak the application","text":"

There are two parameters you can change on this application:

  • On the ia-frontend Deployment, you can modify the DISPLAY_BOX environment variable from true to false. It will hide the bounding box and the inference score, so that you get only the coupon flying over the item.

  • On the ia-inference Deployment, the one used for pre-post processing, you can modify the COUPON_VALUE environment variable. The format is simply an Array with the value of the coupon for the 3 classes: bottle, hat, shirt. As you see, these values could be adjusted in real time, and this could even be based on another ML model!

"},{"location":"openshift-ai/data-science-project/using-projects-the-rhoai/","title":"Using Your Data Science Project (DSP)","text":"

You can access your current projects by navigating to the \"Data Science Projects\" menu item on the left-hand side, as highlighted in the figure below:

If you have any existing projects, they will be displayed here. These projects correspond to your NERC-OCP (OpenShift) resource allocations.

Why we need Data Science Project (DSP)?

To implement a data science workflow, you must use a data science project. Projects allow you and your team to organize and collaborate on resources within separated namespaces. From a project you can create multiple workbenches, each with their own Jupyter notebook environment, and each with their own data connections and cluster storage. In addition, the workbenches can share models and data with pipelines and model servers.

"},{"location":"openshift-ai/data-science-project/using-projects-the-rhoai/#selecting-your-data-science-project","title":"Selecting your data science project","text":"

Here, you can click on specific projects corresponding to the appropriate allocation where you want to work. This brings you to your selected data science project's details page, as shown below:

Within the data science project, you can add the following configuration options:

  • Workbenches: Development environments within your project where you can access notebooks and generate models.

  • Cluster storage: Storage for your project in your OpenShift cluster.

  • Data connections: A list of data sources that your project uses.

  • Pipelines: A list of created and configured pipeline servers.

  • Models and model servers: A list of models and model servers that your project uses.

As you can see in the project's details figure, our selected data science project currently has no workbenches, storage, data connections, pipelines, or model servers.

"},{"location":"openshift-ai/data-science-project/using-projects-the-rhoai/#populate-the-data-science-project-with-a-workbench","title":"Populate the data science project with a Workbench","text":"

Add a workbench by clicking the Create workbench button as shown below:

What are Workbenches?

Workbenches are development environments. They can be based on JupyterLab, but also on other types of IDEs, like VS Code or RStudio. You can create as many workbenches as you want, and they can run concurrently.

On the Create workbench page, complete the following information.

Note: Not all fields are required.

  • Name

  • Description

  • Notebook image (Image selection)

  • Deployment size (Container size and Number of GPUs)

  • Environment variables

  • Cluster storage name

  • Cluster storage description

  • Persistent storage size

  • Data connections

How to specify CPUs, Memory, and GPUs for your JupyterLab workbench?

You have the option to select different container sizes to define compute resources, including CPUs and memory. Each container size comes with pre-configured CPU and memory resources.

Optionally, you can specify the desired Number of GPUs depending on the nature of your data analysis and machine learning code requirements. However, this number should not exceed the GPU quota specified by the value of the \"OpenShift Request on GPU Quota\" attribute that has been approved for this \"NERC-OCP (OpenShift)\" resource allocation on NERC's ColdFront, as described here.

If you need to increase this quota value, you can request a change as explained here.

Once you have entered the information for your workbench, click Create.

For our example project, let's name it \"Tensorflow Workbench\". We'll select the TensorFlow image, choose a Deployment size of Small, Number of GPUs as 1 and allocate a Cluster storage space of 1GB.

More About Cluster Storage

Cluster storage consists of Persistent Volume Claims (PVCs), which are persistent storage spaces available for storing your notebooks and data. You can create PVCs directly from here and mount them in your workbenches as needed. It's worth noting that a default cluster storage (PVC) is automatically created with the same name as your workbench to save your work.

After creating the workbench, you will return to your project page. It shows the status of the workbench as shown below:

Notice that under the status indicator the workbench is Running. However, if any issues arise, such as an \"exceeded quota\" error, a red exclamation mark will appear under the Status indicator, as shown in the example below:

You can hover over that icon to view details. Upon closer inspection of the error message and the \"Event log\", you will receive details about the issue, enabling you to resolve it accordingly.

When your workbench is ready and the status changes to Running, you can select \"Open\" to access your environment:

How can I start or stop a Workbench?

You can use this \"toggle switch\" under the \"Status\" section to easily start/stop this environment later on.

"},{"location":"openshift-ai/get-started/rhoai-overview/","title":"Red Hat OpenShift AI (RHOAI) Overview","text":"

RHOAI offers a versatile and scalable MLOps solution equipped with tools for rapid constructing, deploying, and overseeing AI-driven applications. Integrating the proven features of both Red Hat OpenShift AI and Red Hat OpenShift creates a comprehensive enterprise-grade artificial intelligence and machine learning (AI/ML) application platform, facilitating collaboration among data scientists, engineers, and app developers. This consolidated platform promotes consistency, security, and scalability, fostering seamless teamwork across disciplines and empowering teams to quickly explore, build, train, deploy, test machine learning models, and scale AI-enabled intelligent applications.

Formerly known as Red Hat OpenShift Data Science, OpenShift AI facilitates the complete journey of AI/ML experiments and models. OpenShift AI enables data acquisition and preparation, model training and fine-tuning, model serving and model monitoring, hardware acceleration, and distributed workloads using graphics processing unit (GPU) resources.

"},{"location":"openshift-ai/get-started/rhoai-overview/#ai-for-all","title":"AI for All","text":"

Recent enhancements to Red Hat OpenShift AI include:

  • Implementation Deployment pipelines for monitoring AI/ML experiments and automating ML workflows accelerate the iteration process for data scientists and developers of intelligent applications. This integration facilitates swift iteration on machine learning projects and embeds automation into application deployment and updates.

  • Model serving now incorporates GPU assistance for inference tasks and custom model serving runtimes, enhancing inference performance and streamlining the deployment of foundational models.

  • With Model monitoring, organizations can oversee performance and operational metrics through a centralized dashboard, enhancing management capabilities.

"},{"location":"openshift-ai/get-started/rhoai-overview/#red-hat-openshift-ai-ecosystem","title":"Red Hat OpenShift AI ecosystem","text":"Name Description AI/ML modeling and visualization tools JupyterLab UI with prebuilt notebook images and common Python libraries and packages; TensorFlow; PyTorch, CUDA; and also support for custom notebook images Data engineering Support for different Data Engineering third party tools (optional) Data ingestion and storage Supports Amazon Simple Storage Service (S3) and NERC OpenStack Object Storage GPU support Available NVIDIA GPU Devices (with GPU operator): NVIDIA A100-SXM4-40GB and V100-PCIE-32GB Model serving and monitoring Model serving (KServe with user interface), model monitoring, OpenShift Source-to-Image (S2I), Red Hat OpenShift API Management (optional add-on), Intel Distribution of the OpenVINO toolkit Data science pipelines Data science pipelines (Kubeflow Pipelines) chain together processes like data preparation, build models, and serve models"},{"location":"openshift-ai/logging-in/access-the-rhoai-dashboard/","title":"Access the NERC's OpenShift AI dashboard","text":"

Access the NERC's OpenShift Web Console via the web browser as described here.

Make sure you are selecting \"mss-keycloak\" as shown here:

Once you successfully authenticate you should see the NERC OpenShift Web Console as shown below:

After logging in to the NERC OpenShift console, access the NERC's Red Hat OpenShift AI dashboard by clicking the application launcher icon (the black-and-white icon that looks like a grid), located on the header as shown below:

OpenShift AI uses the same credentials as OpenShift for the dashboard, notebooks, and all other components. When prompted, log in to the OpenShift AI dashboard by using your OpenShift credentials by clicking \"Log In With OpenShift\" button as shown below:

After the NERC OpenShift AI dashboard launches, it displays all currently enabled applications.

You can return to OpenShift Web Console by using the application launcher icon (the black-and-white icon that looks like a grid), and choosing the \"OpenShift Console\" as shown below:

"},{"location":"openshift-ai/logging-in/the-rhoai-dashboard-overview/","title":"The NERC's OpenShift AI dashboard Overview","text":"

In the NERC's RHOAI dashboard, you can see multiple links on your left hand side.

  1. Applications:

    • Enabled: Launch your enabled applications, view documentation, or get started with quick start instructions and tasks.

    • Explore: View optional applications for your RHOAI instance.

      NOTE: Most of them are disabled by default on NERC RHOAI right now.

  2. Data Science Projects: View your existing projects. This will show different projects corresponding to your NERC-OCP (OpenShift) resource allocations. Here, you can choose specific projects corresponding to the appropriate allocation where you want to work. Within these projects, you can create workbenches, deploy various development environments (such as Jupyter Notebooks, VS Code, RStudio, etc.), add data connections, or serve models.

    What are Workbenches?

    Workbenches are development environments. They can be based on JupyterLab, but also on other types of IDEs, like VS Code or RStudio. You can create as many workbenches as you want, and they can run concurrently.

  3. Data Science Pipelines:

    • Pipelines: Manage your pipelines for a specific project selected from the dropdown menu.

    • Runs: Manage and view your runs for a specific project selected from the dropdown menu.

  4. Model Serving: Manage and view the health and performance of your deployed models across different projects corresponding to your NERC-OCP (OpenShift) resource allocations. Also, you can \"Deploy Model\" to a specific project selected from the dropdown menu here.

  5. Resources: Access all learning resources that Resources showcases various tutorials or demos helping your onboarding to the RHOAI platform.

"},{"location":"openshift-ai/other-projects/configure-jupyter-notebook-use-gpus-aiml-modeling/","title":"Configure a Jupyter notebook to use GPUs for AI/ML modeling","text":"

Prerequisites:

Prepare your Jupyter notebook server for using a GPU, you need to have:

  • Select the correct data science project and create workbench, see Populate the data science project for more information.

Please ensure that you start your Jupyter notebook server with options as depicted in the following configuration screen. This screen provides you with the opportunity to select a notebook image and configure its options, including the number of GPUs.

For our example project, let's name it \"PyTorch Workbench\". We'll select the PyTorch image, choose a Deployment size of Small, Number of GPUs as 1 and allocate a Cluster storage space of 1GB.

If this procedure is successful, you have started your Jupyter notebook server. When your workbench is ready, the status will change to Running and you can select \"Open\" to go to your environment:

Once you successfully authenticate you should see the NERC RHOAI JupyterLab Web Interface as shown below:

It's pretty empty right now, though. On the left side of the navigation pane, locate the Name explorer panel. This panel is where you can create and manage your project directories.

"},{"location":"openshift-ai/other-projects/configure-jupyter-notebook-use-gpus-aiml-modeling/#clone-a-github-repository","title":"Clone a GitHub Repository","text":"

You can clone a Git repository in JupyterLab through the left-hand toolbar or the Git menu option in the main menu as shown below:

Let's clone a repository using the left-hand toolbar. Click on the Git icon, shown in below:

Then click on Clone a Repository as shown below:

Enter the git repository URL, which points to the end-to-end ML workflows demo project i.e. https://github.com/rh-aiservices-bu/getting-started-with-gpus.

Then click Clone button as shown below:

Cloning takes a few seconds, after which you can double-click and navigate to the newly-created folder i.e. getting-started-with-gpus that contains your cloned Git repository.

You will be able to find the newly-created folder named getting-started-with-gpus based on the Git repository name, as shown below:

"},{"location":"openshift-ai/other-projects/configure-jupyter-notebook-use-gpus-aiml-modeling/#exploring-the-getting-started-with-gpus-repository-contents","title":"Exploring the getting-started-with-gpus repository contents","text":"

After you've cloned your repository, the getting-started-with-gpus repository contents appear in a directory under the Name pane. The directory contains several notebooks as .ipnyb files, along with a standard license and README file as shown below:

Double-click the torch-use-gpu.ipynb file to open this notebook.

This notebook handles the following tasks:

  1. Importing torch libraries (utilities).

  2. Listing available GPUs.

  3. Checking that GPUs are enabled.

  4. Assigning a GPU device and retrieve the GPU name.

  5. Loading vectors, matrices, and data onto a GPU.

  6. Loading a neural network model onto a GPU.

  7. Training the neural network model.

Start by importing the various torch and torchvision utilities:

import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import TensorDataset\nimport torch.optim as optim\nimport torchvision\nfrom torchvision import datasets\nimport torchvision.transforms as transforms\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\n

Once the utilities are loaded, determine how many GPUs are available:

torch.cuda.is_available() # Do we have a GPU? Should return True.\n
torch.cuda.device_count()  # How many GPUs do we have access to?\n

When you have confirmed that a GPU device is available for use, assign a GPU device and retrieve the GPU name:

device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nprint(device)  # Check which device we got\n
torch.cuda.get_device_name(0)\n

Once you have assigned the first GPU device to your device variable, you are ready to work with the GPU. Let's start working with the GPU by loading vectors, matrices, and data:

X_train = torch.IntTensor([0, 30, 50, 75, 70])  # Initialize a Tensor of Integers with no device specified\nprint(X_train.is_cuda, \",\", X_train.device)  # Check which device Tensor is created on\n
# Move the Tensor to the device we want to use\nX_train = X_train.cuda()\n# Alternative method: specify the device using the variable\n# X_train = X_train.to(device)\n# Confirm that the Tensor is on the GPU now\nprint(X_train.is_cuda, \",\", X_train.device)\n
# Alternative method: Initialize the Tensor directly on a specific device.\nX_test = torch.cuda.IntTensor([30, 40, 50], device=device)\nprint(X_test.is_cuda, \",\", X_test.device)\n

After you have loaded vectors, matrices, and data onto a GPU, load a neural network model:

# Here is a basic fully connected neural network built in Torch.\n# If we want to load it / train it on our GPU, we must first put it on the GPU\n# Otherwise it will remain on CPU by default.\n\nbatch_size = 100\n\nclass SimpleNet(nn.Module):\n    def __init__(self):\n        super(SimpleNet, self).__init__()\n        self.fc1 = nn.Linear(784, 784)\n        self.fc2 = nn.Linear(784, 10)\n\n    def forward(self, x):\n        x = x.view(batch_size, -1)\n        x = self.fc1(x)\n        x = F.relu(x)\n        x = self.fc2(x)\n        output = F.softmax(x, dim=1)\n        return output\n
model = SimpleNet().to(device)  # Load the neural network model onto the GPU\n

After the model has been loaded onto the GPU, train it on a data set. For this example, we will use the FashionMNIST data set:

\"\"\"\n    Data loading, train and test set via the PyTorch dataloader.\n\"\"\"\n# Transform our data into Tensors to normalize the data\ntrain_transform=transforms.Compose([\n        transforms.ToTensor(),\n        transforms.Normalize((0.1307,), (0.3081,))\n        ])\n\ntest_transform=transforms.Compose([\n        transforms.ToTensor(),\n        transforms.Normalize((0.1307,), (0.3081,)),\n        ])\n\n# Set up a training data set\ntrainset = datasets.FashionMNIST('./data', train=True, download=True,\n                  transform=train_transform)\ntrain_loader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,\n                                          shuffle=False, num_workers=2)\n\n# Set up a test data set\ntestset = datasets.FashionMNIST('./data', train=False,\n                  transform=test_transform)\ntest_loader = torch.utils.data.DataLoader(testset, batch_size=batch_size,\n                                        shuffle=False, num_workers=2)\n

Once the FashionMNIST data set has been downloaded, you can take a look at the dictionary and sample its content.

# A dictionary to map our class numbers to their items.\nlabels_map = {\n    0: \"T-Shirt\",\n    1: \"Trouser\",\n    2: \"Pullover\",\n    3: \"Dress\",\n    4: \"Coat\",\n    5: \"Sandal\",\n    6: \"Shirt\",\n    7: \"Sneaker\",\n    8: \"Bag\",\n    9: \"Ankle Boot\",\n}\n\n# Plotting 9 random different items from the training data set, trainset.\nfigure = plt.figure(figsize=(8, 8))\nfor i in range(1, 3 * 3 + 1):\n    sample_idx = torch.randint(len(trainset), size=(1,)).item()\n    img, label = trainset[sample_idx]\n    figure.add_subplot(3, 3, i)\n    plt.title(labels_map[label])\n    plt.axis(\"off\")\n    plt.imshow(img.view(28,28), cmap=\"gray\")\nplt.show()\n

The following figure shows a few of the data set's pictures:

There are ten classes of fashion items (e.g. shirt, shoes, and so on). Our goal is to identify which class each picture falls into. Now you can train the model and determine how well it classifies the items:

def train(model, device, train_loader, optimizer, epoch):\n    \"\"\"Model training function\"\"\"\n    model.train()\n    print(device)\n    for batch_idx, (data, target) in tqdm(enumerate(train_loader)):\n        data, target = data.to(device), target.to(device)\n        optimizer.zero_grad()\n        output = model(data)\n        loss = F.nll_loss(output, target)\n        loss.backward()\n        optimizer.step()\n
def test(model, device, test_loader):\n    \"\"\"Model evaluating function\"\"\"\n    model.eval()\n    test_loss = 0\n    correct = 0\n    # Use the no_grad method to increase computation speed\n    # since computing the gradient is not necessary in this step.\n    with torch.no_grad():\n        for data, target in test_loader:\n            data, target = data.to(device), target.to(device)\n            output = model(data)\n            test_loss += F.nll_loss(output, target, reduction='sum').item()  # sum up batch loss\n            pred = output.argmax(dim=1, keepdim=True)  # get the index of the max log-probability\n            correct += pred.eq(target.view_as(pred)).sum().item()\n\n    test_loss /= len(test_loader.dataset)\n\n    print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n        test_loss, correct, len(test_loader.dataset),\n        100. * correct / len(test_loader.dataset)))\n
# number of  training 'epochs'\nEPOCHS = 5\n# our optimization strategy used in training.\noptimizer = optim.Adadelta(model.parameters(), lr=0.01)\n
for epoch in range(1, EPOCHS + 1):\n        print( f\"EPOCH: {epoch}\")\n        train(model, device, train_loader, optimizer, epoch)\n        test(model, device, test_loader)\n

As the model is trained, you can follow along as its accuracy increases from 63 to 72 percent. (Your accuracies might differ, because accuracy can depend on the random initialization of weights.)

Once the model is trained, save it locally:

# Saving the model's weights!\ntorch.save(model.state_dict(), \"mnist_fashion_SimpleNet.pt\")\n
"},{"location":"openshift-ai/other-projects/configure-jupyter-notebook-use-gpus-aiml-modeling/#load-and-run-a-pytorch-model","title":"Load and run a PyTorch model","text":"

Let's now determine how our simple torch model performs using GPU resources.

In the getting-started-with-gpus directory, double click on the torch-test-model.ipynb file (highlighted as shown below) to open the notebook.

After importing the torch and torchvision utilities, assign the first GPU to your device variable. Prepare to import your trained model, then place the model on your GPU and load in its trained weights:

import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision import datasets\nimport torchvision.transforms as transforms\nimport matplotlib.pyplot as plt\n
device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nprint(device)  # let's see what device we got\n
# Getting set to import our trained model.\n\n# batch size of 1 so we can look at one image at time.\nbatch_size = 1\n\n\nclass SimpleNet(nn.Module):\n    def __init__(self):\n        super(SimpleNet, self).__init__()\n        self.fc1 = nn.Linear(784, 784)\n        self.fc2 = nn.Linear(784, 10)\n\n    def forward(self, x):\n        x = x.view(batch_size, -1)\n        x = self.fc1(x)\n        x = F.relu(x)\n        x = self.fc2(x)\n        output = F.softmax(x, dim=1)\n        return output\n
model = SimpleNet().to( device )\nmodel.load_state_dict( torch.load(\"mnist_fashion_SimpleNet.pt\") )\n

You are now ready to examine some data and determine how your model performs. The sample run as shown below shows that the model predicted a \"bag\" with a confidence of about 0.9192. Despite the % in the output, 0.9192 is very good because a perfect confidence would be 1.0.

"},{"location":"openshift-ai/other-projects/how-access-s3-data-then-download-and-analyze-it/","title":"How to access, download, and analyze data for S3 usage","text":"

Prerequisites:

Prepare your Jupyter notebook server for using a GPU, you need to have:

  • Select the correct data science project and create workbench, see Populate the data science project for more information.

Please ensure that you start your Jupyter notebook server with options as depicted in the following configuration screen. This screen provides you with the opportunity to select a notebook image and configure its options, including the number of GPUs.

For our example project, let's name it \"Standard Data Science Workbench\". We'll select the Standard Data Science image, choose a Deployment size of Small, Number of GPUs as 0 and allocate a Cluster storage space of 1GB.

If this procedure is successful, you have started your Jupyter notebook server. When your workbench is ready, the status will change to Running and you can select \"Open\" to go to your environment:

Once you successfully authenticate you should see the NERC RHOAI JupyterLab Web Interface as shown below:

It's pretty empty right now, though. On the left side of the navigation pane, locate the Name explorer panel. This panel is where you can create and manage your project directories.

"},{"location":"openshift-ai/other-projects/how-access-s3-data-then-download-and-analyze-it/#clone-a-github-repository","title":"Clone a GitHub Repository","text":"

You can clone a Git repository in JupyterLab through the left-hand toolbar or the Git menu option in the main menu as shown below:

Let's clone a repository using the left-hand toolbar. Click on the Git icon, shown in below:

Then click on Clone a Repository as shown below:

Enter the git repository URL, which points to the end-to-end ML workflows demo project i.e. https://github.com/rh-aiservices-bu/access-s3-data.

Then click Clone button as shown below:

Cloning takes a few seconds, after which you can double-click and navigate to the newly-created folder i.e. access-s3-data that contains your cloned Git repository.

You will be able to find the newly-created folder named access-s3-data based on the Git repository name, as shown below:

"},{"location":"openshift-ai/other-projects/how-access-s3-data-then-download-and-analyze-it/#access-and-download-s3-data","title":"Access and download S3 data","text":"

In the Name menu, double-click the downloadData.ipynb notebook in the file explorer on the left side to launch it. This action will open another tab in the content section of the environment, on the right.

Run each cell in the notebook, using the Shift-Enter key combination, and pay attention to the execution results. Using this notebook, we will:

  • Make a connection to an AWS S3 storage bucket

  • Download a CSV file into the \"datasets\" folder

  • Rename the downloaded CSV file to \"newtruckdata.csv\"

"},{"location":"openshift-ai/other-projects/how-access-s3-data-then-download-and-analyze-it/#view-your-new-csv-file","title":"View your new CSV file","text":"

Inside the \"datasets\" directory, double-click the \"newtruckdata.csv\" file. File contents should appear as shown below:

The file contains the data you will analyze and perform some analytics.

"},{"location":"openshift-ai/other-projects/how-access-s3-data-then-download-and-analyze-it/#getting-ready-to-run-analysis-on-your-new-csv-file","title":"Getting ready to run analysis on your new CSV file","text":"

Since you now have data, you can open the next Jupyter notebook, simpleCalc.ipynb, and perform the following operations:

  • Create a dataframe.

  • Perform simple total and average calculations.

  • Print the calculation results.

"},{"location":"openshift-ai/other-projects/how-access-s3-data-then-download-and-analyze-it/#analyzing-your-s3-data-access-run-results","title":"Analyzing your S3 data access run results","text":"

Double-click the simpleCalc.ipynb Python file. When you execute the cells in the notebook, results appear like the ones shown below:

The cells in the above figure show the mileage of four vehicles. In the next cell, we calculate total mileage, total rows (number of vehicles) and the average mileage for all vehicles. Execute the \"Perform Calculations\" cell to see basic calculations performed on the data as shown below:

Calculations show the total mileage as 742, for four vehicles, and an average mileage of 185.5.

Success! You have added analyzed your run results using the NERC RHOAI.

"},{"location":"openstack/","title":"OpenStack Tutorial Index","text":"

If you're just starting out, we recommend starting from

Access the OpenStack Dashboard and going through the tutorial in order.

If you just need to review a specific step, you can find the page you need in the list below.

"},{"location":"openstack/#logging-in","title":"Logging In","text":"
  • Access the OpenStack Dashboard <<-- Start Here
  • Dashboard Overview
"},{"location":"openstack/#access-and-security","title":"Access and Security","text":"
  • Security Groups
  • Create a Key Pair
"},{"location":"openstack/#create-connect-to-the-vm","title":"Create & Connect to the VM","text":"
  • Launch a VM
  • Create a Windows VM
  • Available Images
  • Available NOVA Flavors
  • Assign a Floating IP
  • SSH to the VM
"},{"location":"openstack/#openstack-cli","title":"OpenStack CLI","text":"
  • OpenStack CLI
  • Launch a VM using OpenStack CLI
"},{"location":"openstack/#persistent-storage","title":"Persistent Storage","text":""},{"location":"openstack/#block-storage-volumes-cinder","title":"Block Storage/ Volumes/ Cinder","text":"
  • Block Storage/ Volumes/ Cinder
  • Create an empty volume
  • Attach the volume to an instance
  • Format and Mount the Volume
  • Detach a Volume
  • Delete Volumes
  • Extending Volume
  • Transfer a Volume
"},{"location":"openstack/#object-storage-swift","title":"Object Storage/ Swift","text":"
  • Object Storage/ Swift
  • Mount The Object Storage
"},{"location":"openstack/#data-transfer","title":"Data Transfer","text":"
  • Data Transfer To/ From NERC VM
"},{"location":"openstack/#backup-your-instance-and-data","title":"Backup your instance and data","text":"
  • Backup with snapshots
"},{"location":"openstack/#vm-management","title":"VM Management","text":"
  • VM Management
"},{"location":"openstack/#decommission-openstack-resources","title":"Decommission OpenStack Resources","text":"
  • Decommission OpenStack Resources
"},{"location":"openstack/#advanced-openstack-topics","title":"Advanced OpenStack Topics","text":""},{"location":"openstack/#setting-up-your-own-network","title":"Setting Up Your Own Network","text":"
  • Set up your own Private Network
  • Create a Router
"},{"location":"openstack/#domain-or-host-name-for-your-vm","title":"Domain or Host Name for your VM","text":"
  • Domain Name System (DNS)
"},{"location":"openstack/#using-terraform-to-provision-nerc-resources","title":"Using Terraform to provision NERC resources","text":"
  • Terraform on NERC
"},{"location":"openstack/#python-sdk","title":"Python SDK","text":"
  • Python SDK
"},{"location":"openstack/#setting-up-your-own-images","title":"Setting Up Your Own Images","text":"
  • Microsoft Windows image
"},{"location":"openstack/access-and-security/create-a-key-pair/","title":"Create a Key-pair","text":"

NOTE

If you will be using PuTTY on Windows, please read this first.

"},{"location":"openstack/access-and-security/create-a-key-pair/#add-a-key-pair","title":"Add a Key Pair","text":"

For security, the VM images have password authentication disabled by default, so you will need to use an SSH key pair to log in.

You can view key pairs by clicking Project, then click Compute panel and choose Key Pairs from the tabs that appears. This shows the key pairs that are available for this project.

"},{"location":"openstack/access-and-security/create-a-key-pair/#generate-a-key-pair","title":"Generate a Key Pair","text":"

Prerequisite

You need ssh installed in your system.

You can create a key pair on your local machine, then upload the public key to the cloud. This is the recommended method.

Open a terminal and type the following commands (in this example, we have named the key cloud.key, but you can name it anything you want):

cd ~/.ssh\nssh-keygen -t rsa -f ~/.ssh/cloud.key -C \"label_your_key\"\n

Example:

You will be prompted to create a passphrase for the key. IMPORTANT: Do not forget the passphrase! If you do, you will be unable to use your key.

This process creates two files in your .ssh folder:

cloud.key      # private key - don\u2019t share this with anyone, and never upload\n# it anywhere ever\ncloud.key.pub  # this is your public key\n

Pro Tip

The -C \"label\" field is not required, but it is useful to quickly identify different public keys later.

You could use your email address as the label, or a user@host tag that identifies the computer the key is for.

For example, if Bob has both a laptop and a desktop computer that he will, he might use -C \"Bob@laptop\" to label the key he generates on the laptop, and -C \"Bob@desktop\" for the desktop.

On your terminal:

pbcopy < ~/.ssh/cloud.key.pub  #copies the contents of public key to your clipboard\n

Pro Tip

If pbcopy isn't working, you can locate the hidden .ssh folder, open the file in your favorite text editor, and copy it to your clipboard.

"},{"location":"openstack/access-and-security/create-a-key-pair/#import-the-generated-key-pair","title":"Import the generated Key Pair","text":"

Now that you have created your keypair in ~/.ssh/cloud.key.pub, you can upload it to OpenStack by either using Horizon dashboard or OpenStack CLI as described below:

"},{"location":"openstack/access-and-security/create-a-key-pair/#1-using-nercs-horizon-dashboard","title":"1. Using NERC's Horizon dashboard","text":"

Go back to the Openstack Dashboard, where you should still be on the Key Pairs tab

(If not, find it under Project -> Compute -> Key Pairs)

Choose \"Import Public Key\". Give the key a name in the \"Key Pair Name\" Box, choose \"SSH Key\" as the Key Type dropdown option and paste the public key that you just copied in the \"Public Key\" text box.

Click \"Import Public Key\". You will see your key pair appear in the list.

You can now skip ahead to Adding the key to an ssh-agent.

"},{"location":"openstack/access-and-security/create-a-key-pair/#2-using-the-openstack-cli","title":"2. Using the OpenStack CLI","text":"

Prerequisites:

To run the OpenStack CLI commands, you need to have:

  • OpenStack CLI setup, see OpenStack Command Line setup for more information.

To create OpenStack keypair using the CLI, do this:

"},{"location":"openstack/access-and-security/create-a-key-pair/#using-the-openstack-client-commands","title":"Using the openstack client commands","text":"

Now that you have created your keypair in ~/.ssh/cloud.key.pub, you can upload it to OpenStack with name \"my-key\" as follows:

openstack keypair create --public-key ~/.ssh/cloud.key.pub my-key\n+-------------+-------------------------------------------------+\n| Field       | Value                                           |\n+-------------+-------------------------------------------------+\n| created_at  | None                                            |\n| fingerprint | 1c:40:db:ea:82:c2:c3:05:58:81:84:4b:e3:4f:c2:a1 |\n| id          | my-key                                          |\n| is_deleted  | None                                            |\n| name        | my-key                                          |\n| type        | ssh                                             |\n| user_id     | 938eb8bfc72e4ca3ad2c94e2eb4059f7                |\n+-------------+-------------------------------------------------+\n
"},{"location":"openstack/access-and-security/create-a-key-pair/#create-a-key-pair-using-horizon-dashboard","title":"Create a Key Pair using Horizon dashboard","text":"

Alternatively, if you are having trouble creating and importing a key pair with the instructions above, the Openstack Horizon dashboard can make one for you.

Click \"Create a Key Pair\", and enter a name for the key pair.

Click on \"Create a Key Pair\" button. You will be prompted to download a .pem file containing your private key.

In the example, we have named the key 'cloud_key.pem', but you can name it anything.

Save this file to your hard drive, for example in your Downloads folder.

Copy this key inside the .ssh folder on your local machine/laptop, using the following steps:

cd ~/Downloads          # Navigate to the folder where you saved the .pem file\nmv cloud.pem ~/.ssh/    # This command will copy the key you downloaded to\n# your .ssh folder.\ncd ~/.ssh               # Navigate to your .ssh folder\nchmod 400 cloud.pem     # Change the permissions of the file\n

To see your public key, navigate to Project -> Compute -> Key Pairs

You should see your key in the list.

If you click on the name of the newly added key, you will see a screen of information that includes details about your public key:

The public key is the part of the key you distribute to VMs and remote servers.

You may find it convenient to paste it into a file inside your .ssh folder, so you don't always need to log into the website to see it.

Call the file something like cloud_key.pub to distinguish it from your private key.

Very Important: Security Best Practice

Never share your private key with anyone, or upload it to a server!

"},{"location":"openstack/access-and-security/create-a-key-pair/#adding-your-ssh-key-to-the-ssh-agent","title":"Adding your SSH key to the ssh-agent","text":"

If you have many VMs, you will most likely be using one or two VMs with public IPs as a gateway to others which are not reachable from the internet.

In order to be able to use your key for multiple SSH hops, do NOT copy your private key to the gateway VM!

The correct method to use Agent Forwarding, which adds the key to an ssh-agent on your local machine and 'forwards' it over the SSH connection.

If ssh-agent is not already running in background, you need to start the ssh-agent in the background.

eval \"$(ssh-agent -s)\"\n> Agent pid 59566\n

Then, add the key to your ssh agent:

cd ~/.ssh\nssh-add cloud.key\nIdentity added: cloud.key (test_user@laptop)\n

Check that it is added with the command

ssh-add -l\n2048 SHA256:D0DLuODzs15j2OaZnA8I52aEeY3exRT2PCsUyAXgI24 test_user@laptop (RSA)\n

Depending on your system, you might have to repeat these steps after you reboot or log out of your computer.

You can always check if your ssh key is added by running the ssh-add -l command.

A key with the default name id_rsa will be added by default at login, although you will still need to unlock it with your passphrase the first time you use it.

Once the key is added, you will be able to forward it over an SSH connection, like this:

ssh -A -i cloud.key <username>@<remote-host-IP>\n

Connecting via SSH is discussed in more detail later in the tutorial (SSH to Cloud VM); for now, just proceed to the next step below.

"},{"location":"openstack/access-and-security/create-a-key-pair/#ssh-keys-with-putty-on-windows","title":"SSH keys with PuTTY on Windows","text":"

PuTTY requires SSH keys to be in its own ppk format. To convert between OpenSSH keys used by OpenStack and PuTTY's format, you need a utility called PuTTYgen.

If it was not installed when you originally installed PuTTY, you can get it here: Download PuTTY.

You have 2 options for generating keys that will work with PuTTY:

  1. Generate an OpenSSH key with ssh-keygen or from the Horizon GUI using the instructions above, then use PuTTYgen to convert the private key to .ppk

  2. Generate a .ppk key with PuTTYgen, and import the provided OpenSSH public key to OpenStack using the 'Import the generated Key Pair' instructions above.

There is a detailed walkthrough of how to use PuTTYgen here: Use SSH Keys with PuTTY on Windows.

"},{"location":"openstack/access-and-security/security-groups/","title":"Security Groups","text":"

Security groups can be thought of like firewalls. They ultimately control inbound and outbound traffic to your virtual machines.

Before you launch an instance, you should add security group rules to enable users to ping and use SSH to connect to the instance. Security groups are sets of IP filter rules that define networking access and are applied to all instances within a project. To do so, you either add rules to the default security group Add a rule to the default security group or add a new security group with rules.

You can view security groups by clicking Project, then click Network panel and choose Security Groups from the tabs that appears.

Navigate to Project -> Network -> Security Groups.

You should see a \u2018default\u2019 security group. The default security group allows traffic only between members of the security group, so by default you can always connect between VMs in this group. However, it blocks all traffic from outside, including incoming SSH connections. In order to access instances via a public IP, an additional security group is needed. on the other hand, for a VM that hosts a web server, you need a security group which allows access to ports 80 (for http) and 443 (for https).

Important Note

We strongly advise against altering the default security group and suggest refraining from adding extra security rules to it. This is because the default security group is automatically assigned to any newly created VMs. It is considered a best practice to create separate security groups for related services, as these groups can be reused multiple times.Security groups are very highly configurable, for insance, you might create a basic/ generic group for ssh (port 22) and icmp (which is what we will show as an example here) and then a separate security group for http (port 80) and https (port 443) access if you're running a web service on your instance.

You can also limit access based on where the traffic originates, using either IP addresses or security groups to define the allowed sources.

"},{"location":"openstack/access-and-security/security-groups/#create-a-new-security-group","title":"Create a new Security Group","text":""},{"location":"openstack/access-and-security/security-groups/#allowing-ssh","title":"Allowing SSH","text":"

To allow access to your VM for things like SSH, you will need to create a security group and add rules to it.

Click on \"Create Security Group\". Give your new group a name, and a brief description.

You will see some existing rules:

Let's create the new rule to allow SSH. Click on \"Add Rule\".

You will see there are a lot of options you can configure on the Add Rule dialog box.

To check all available Rule

You can choose the desired rule template as shown under Rule dropdown options. This will automatically select the Port required for the selected custom rule.

Enter the following values:

  • Rule: SSH

  • Remote: CIDR

  • CIDR: 0.0.0.0/0

    Note

    To accept requests from a particular range of IP addresses, specify the IP address block in the CIDR box.

The new rule now appears in the list. This signifies that any instances using this newly added Security Group will now have SSH port 22 open for requests from any IP address.

"},{"location":"openstack/access-and-security/security-groups/#allowing-ping","title":"Allowing Ping","text":"

The default configuration blocks ping responses, so you will need to add an additional group and/or rule if you want your public IPs to respond to ping requests.

Ping is ICMP traffic, so the easiest way to allow it is to add a new rule and choose \"ALL ICMP\" from the dropdown.

In the Add Rule dialog box, enter the following values:

  • Rule: All ICMP

  • Direction: Ingress

  • Remote: CIDR

  • CIDR: 0.0.0.0/0

Instances will now accept all incoming ICMP packets.

"},{"location":"openstack/access-and-security/security-groups/#allowing-rdp","title":"Allowing RDP","text":"

To allow access to your VM for things like Remote Desktop Protocol (RDP), you will need to create a security group and add rules to it.

Click on \"Create Security Group\". Give your new group a name, and a brief description.

You will see some existing rules:

Let's create the new rule to allow SSH. Click on \"Add Rule\".

You will see there are a lot of options you can configure on the Add Rule dialog box.

Choose \"RDP\" from the Rule dropdown option as shown below:

Enter the following values:

  • Rule: RDP

  • Remote: CIDR

  • CIDR: 0.0.0.0/0

Note

To accept requests from a particular range of IP addresses, specify the IP address block in the CIDR box.

The new rule now appears in the list. This signifies that any instances using this newly added Security Group will now have RDP port 3389 open for requests from any IP address.

"},{"location":"openstack/access-and-security/security-groups/#editing-existing-security-group-and-adding-new-security-rules","title":"Editing Existing Security Group and Adding New Security Rules","text":"
  • Navigate to Security Groups:

    Navigate to Project -> Network -> Security Groups.

  • Select the Security Group:

    Choose the security group to which you want to add new rules.

  • Add New Rule:

    Look for an option to add a new rule within the selected security group.

    Specify the protocol, port range, and source/destination details for the new rule.

  • Save Changes:

    Save the changes to apply the new security rules to the selected security group.

Important Note

Security group changes may take some time to propagate to the instances associated with the modified group. Ensure that new rules align with your network security requirements.

"},{"location":"openstack/access-and-security/security-groups/#update-security-groups-to-a-running-vm","title":"Update Security Group(s) to a running VM","text":"

If you want to attach/deattach any new Security Group(s) to a running VM after it was launched. First create all new Security Group(s) with all rules required as described here. Note that same Security Groups can be used by multiple VMs so don't create same or redundant Security Rules based Security Groups as there are Quota per project. Once have created all Security Groups, you can easily attach them with any existing VM(s). You can select the VM from Compute -> Instances tab and then select \"Edit Security Groups\" as shown below:

Then select all Security Group(s) that you want to attach to this VM by clicking on \"+\" icon and then click \"Save\" as shown here:

"},{"location":"openstack/advanced-openstack-topics/domain-name-system/domain-names-for-your-vms/","title":"DNS services in NERC OpenStack","text":""},{"location":"openstack/advanced-openstack-topics/domain-name-system/domain-names-for-your-vms/#what-is-dns","title":"What is DNS?","text":"

The Domain Name System (DNS) is a ranked and distributed system for naming resources connected to a network, and works by storing various types of record, such as an IP address associated with a domain name.

DNS simplifies the communication between computers and servers through a network and provides a user-friendly method for users to interact with and get the desired information.

"},{"location":"openstack/advanced-openstack-topics/domain-name-system/domain-names-for-your-vms/#how-to-get-user-friendly-domain-names-for-your-nerc-vms","title":"How to get user-friendly domain names for your NERC VMs?","text":"

NERC does not currently offer integrated domain name service management.

You can use one of the following methods to configure name resolution (DNS) for your NERC's virtual instances.

"},{"location":"openstack/advanced-openstack-topics/domain-name-system/domain-names-for-your-vms/#1-using-freely-available-free-dynamic-dns-services","title":"1. Using freely available free Dynamic DNS services","text":"

Get a free domain or host name from no-ip.com or other

free Dynamic DNS services.

Here we will describe how to use No-IP to configure dynamic DNS.

Step 1: Create your No-IP Account.

During this process you can add your desired unique hostname with pre-existing domain name or you can choose to create your hostname later on.

Step 2: Confirm Your Account by verifing your email address.

Step 3: Log In to Your Account to view your dashboard.

Step 4: Add Floating IP of your instance to the Hostname.

Click on \"Modify\" to add your own Floating IP attached to your NERC virtual instance.

Then, browse your host or domain name as you setup during registration or later i.e. http://nerc.hopto.org on above example.

Easy video tutorial can be found here.

Having a free option is great for quick demonstrate your project but this has the following restrictions:

"},{"location":"openstack/advanced-openstack-topics/domain-name-system/domain-names-for-your-vms/#2-using-nginx-proxy-manager","title":"2. Using Nginx Proxy Manager","text":"

You can setup Nginx Proxy Manager on one of your NERC VMs and then use this Nginx Proxy Manager as your gateway to forward to your other web based services.

"},{"location":"openstack/advanced-openstack-topics/domain-name-system/domain-names-for-your-vms/#quick-setup","title":"Quick Setup","text":"

i. Launch a VM with a security group that has opened rule for port 80, 443 and 22 to enable SSH Port Forwarding, aka SSH Tunneling i.e. Local Port Forwarding into the VM.

ii. SSH into your VM using your private key after attaching a Floating IP.

iii. Install Docker and Docker-Compose based on your OS choice for your VM.

iv. Create a docker-compose.yml file similar to this:

version: \"3\"\nservices:\n    app:\n        image: \"jc21/nginx-proxy-manager:latest\"\n        restart: unless-stopped\n        ports:\n            - \"80:80\"\n            - \"81:81\"\n            - \"443:443\"\n        volumes:\n            - ./data:/data\n            - ./letsencrypt:/etc/letsencrypt\n

v. Bring up your stack by running:

docker-compose up -d\n\n# If using docker-compose-plugin\ndocker compose up -d\n

vi. Once the docker container runs successfully, connect to it on Admin Web Port i.e. 81 opened for the admin interface via SSH Tunneling i.e. Local Port Forwarding from your local machine's terminal by running:

ssh -N -L <Your_Preferred_Port>:localhost:81 <User>@<Floating-IP> -i <Path_To_Your_Private_Key>

Here, you can choose any port that is available on your machine as <Your_Preferred_Port> and then VM's assigned Floating IP as <Floating-IP> and associated Private Key pair attached to the VM as <Path_To_Your_Private_Key>.

For e.g. ssh -N -L 8081:localhost:81 ubuntu@199.94.60.24 -i ~/.ssh/cloud.key

vii. Once the SSH Tunneling is successful, log in to the Nginx Proxy Manager Admin UI on your web browser: http://localhost:<Your_Preferred_Port> i.e. http://localhost:8081

Information

It may take some time to spin up the Admin UI. Your terminal running the SSH Tunneling i.e. Local Port Forwarding will not show any logs or output when successfully done. Also your should not close or terminate the terminal while runnng the tunneling sessions and using the Admin UI.

Default Admin User:

Email:    admin@example.com\nPassword: changeme\n

Immediately after logging in with this default user you will be asked to modify your admin details and change your password.

"},{"location":"openstack/advanced-openstack-topics/domain-name-system/domain-names-for-your-vms/#how-to-create-a-proxy-host-with-lets-encrypt-ssl-certificate-attached-to-it","title":"How to create a Proxy Host with Let's Encrypt SSL Certificate attached to it","text":"

i. Click on Hosts >> Proxy Hosts, then click on \"Add Proxy Host\" button as shown below:

ii. On the popup box, enter your Domain Names (This need to be registed from your research institution or purchased on other third party vendor services and your have its administrative access)

Important Note

The Domain Name need to have an A Record pointing to the public floating IP of your NERC VM where you are hosting the Nginx Proxy Manager!

Please fill out the following information on this popup box:

  • Scheme: http

  • Forward Hostname/IP: <The Private-IP of your NERC VM where you are hosting the web services>

  • Forward Port: <Port exposed on your VM to the public>

  • Enable all toggles i.e. Cache Assets, Block Common Exploits, Websockets Support

  • Access List: Publicly Accessible

For your reference, you can review your selection should looks like below with your own Domain Name and other settings:

Also, select the SSL tab and then \"Request a new SSL Certificate\" with settings as shown below:

iii. Once Saved clicking the \"Save\" button. It should show you Status \"Online\" and when you click on the created Proxy Host link it will load the web services with https and domain name you defined i.e. https://<Your-Domain-Name>.

"},{"location":"openstack/advanced-openstack-topics/domain-name-system/domain-names-for-your-vms/#3-using-your-local-research-computing-rc-department-or-academic-institutions-central-it-services","title":"3. Using your local Research Computing (RC) department or academic institution's Central IT services","text":"

You need to contact and work with your Research Computing department or academic institution's Central IT services to create A record for your hostname that maps to the address of a Floating IP of your NERC virtual instance.

A record: The primary DNS record used to connect your domain to an IP address that directs visitors to your website.

"},{"location":"openstack/advanced-openstack-topics/domain-name-system/domain-names-for-your-vms/#4-using-commercial-dns-providers","title":"4. Using commercial DNS providers","text":"

Alternatively, you can purchase a fully registered domain name or host name from commercial hosting providers and then register DNS records for your virtual instance from commercial cloud servies i.e. AWS Route53, Azure DNS, CloudFlare, Google Cloud Platform, GoDaddy, etc.

"},{"location":"openstack/advanced-openstack-topics/python-sdk/python-SDK/","title":"References","text":"

Python SDK page at PyPi

OpenStack Python SDK User Guide

From the Python SDK page at Pypi:

Definition

Python SDK is a client library for building applications to work with OpenStack clouds. The project aims to provide a consistent and complete set of interactions with OpenStack's many services, along with complete documentation, examples, and tools.

If you need to plug OpenStack into existing scripts using another language, there are a variety of other SDKs at various levels of active development.

A list of known SDKs is maintained on the official OpenStack wiki. Known SDKs

"},{"location":"openstack/advanced-openstack-topics/setting-up-a-network/create-a-router/","title":"Create a Router","text":"

A router acts as a gateway for external connectivity.

By connecting your private network to the public network via a router, you can connect your instance to the Internet, install packages, etc. without needing to associate it with a public IP address.

You can view routers by clicking Project, then click Network panel and choose Routers from the tabs that appears.

Click \"Create Network\" button on the right side of the screen.

In the Create Router dialog box, specify a name for the router.

From the External Network dropdown, select the \u2018provider\u2019 network, and click \"Create Router\" button. This will set the Gateway for the new router to public network.

The new router is now displayed in the Routers tab. You should now see the router in the Network Topology view. (It also appears under Project -> Network -> Routers).

Notice that it is now connected to the public network, but not your private network.

"},{"location":"openstack/advanced-openstack-topics/setting-up-a-network/create-a-router/#set-internal-interface-on-the-router","title":"Set Internal Interface on the Router","text":"

In order to route between your private network and the outside world, you must give the router an interface on your private network.

Perform the following steps in order to To connect a private network to the newly created router:

a. On the Routers tab, click the name of the router.

b. On the Router Details page, click the Interfaces tab, then click Add Interface.

c. In the Add Interface dialog box, select a Subnet.

Optionally, in the Add Interface dialog box, set an IP Address for the router interface for the selected subnet.

If you choose not to set the IP Address value, then by default OpenStack Networking uses the first host IP address in the subnet.

The Router Name and Router ID fields are automatically updated.

d. Click \"Add Interface\".

The Router will now appear connected to the private network in Network Topology tab.

OR,

You can set Internal Interface on the Router From the Network Topology view, click on the router you just created, and click \u2018Add Interface\u2019 on the popup that appears.

Then, this will show Add Interface dialog box. So, you just complete steps b to c as mentioned above.

"},{"location":"openstack/advanced-openstack-topics/setting-up-a-network/set-up-a-private-network/","title":"Set up a Private Network","text":"

Default Network for your Project

During your project setup, NERC will setup a default network, router and interface for your project that is ready-to-use.

"},{"location":"openstack/advanced-openstack-topics/setting-up-a-network/set-up-a-private-network/#create-your-own-private-network","title":"Create Your Own Private Network","text":"

You can view/ create your/ existing network topology by clicking Project, then click Network panel and choose Network Topology from the tabs that appears. This shows public network which is accessible to all projects.

Click on \"Networks\" tab and then click \"Create Network\" button on the right side of the screen.

In the Create Network dialog box, specify the following values.

  • Network tab:

    Network Name: Specify a name to identify the network.

    Admin State: The state to start the network in.

    Create Subnet: Select this check box to create a subnet

    Give your network a name, and leave the two checkboxes for \"Admin State\" and \"Create Subnet\" with the default settings.

  • Subnet tab:

    You do not have to specify a subnet when you create a network, but if you do not specify a subnet, the network can not be attached to an instance.

    Subnet Name: Specify a name for the subnet.

    Network Address: Specify the IP address for the subnet. For your private networks, you should use IP addresses which fall within the ranges that are specifically reserved for private networks:

    10.0.0.0/8\n172.16.0.0/12\n192.168.0.0/16\n

    In the example below, we configure a network containing addresses 192.168.0.1 to 192.168.0.255 using CIDR 192.168.0.0/24 Technically, your private network will still work if you choose any IP outside these ranges, but this causes problems with connecting to IPs in the outside world - so don't do it!

    IP Version: Select IPv4 or IPv6.

    Gateway IP: Specify an IP address for a specific gateway. This parameter is optional.

    Disable Gateway: Select this check box to disable a gateway IP address.

  • Subnet Details tab

    Enable DHCP: Select this check box to enable DHCP so that your VM instances will automatically be assigned an IP on the subnet.

    Allocation Pools: Specify IP address pools.

    DNS Name Servers: Specify a name for the DNS server. If you use '8.8.8.8' (you may recognize this as one of Google's public name servers).

    Host Routes: Specify the IP address of host routes.

    For now, you can leave the Allocation Pools and Host Routes boxes empty and click on \"Create\" button. But here we specify Allocation Pools of 192.168.0.2,192.168.0.254.

    The Network Topology should now show your virtual private network next to the public network.

"},{"location":"openstack/advanced-openstack-topics/setting-up-your-own-images/how-to-build-windows-image/","title":"Virtual Machine Image Guide","text":"

An OpenStack Compute cloud needs to have virtual machine images in order to launch an instance. A virtual machine image is a single file which contains a virtual disk that has a bootable operating system installed on it.

Very Important

The provided Windows Server 2022 R2 image is for evaluation only. This evaluation edition expires in 180 days. This is intended to evaluate if the product is right for you. This is on user discretion to update, extend, and handle licensing issues for future usages.

How to extend activation grace period for another 180 days?

Remote desktop to your running Windows VM. Using the search function in your taskbar, look up Command Prompt. When you see it in the results, right-click on it and choose Run as Administrator. Your VM's current activation grace period can be reset by running: slmgr -rearm. Once this command is run successfully, restart your instance for the changes to take effect. This command typically resets the activation timer to 180 days and can be performed only for a limited number of times. For more about this read here.

"},{"location":"openstack/advanced-openstack-topics/setting-up-your-own-images/how-to-build-windows-image/#existing-microsoft-windows-image","title":"Existing Microsoft Windows Image","text":"

Cloudbase Solutions provides Microsoft Windows Server 2022 R2 Standard Evaluation for OpenStack. This includes the required support for hypervisor-specific drivers (Hyper-V / KVM). Also integrated are the guest initialization tools (Cloudbase-Init), security updates, proper performance, and security configurations as well as the final Sysprep.

"},{"location":"openstack/advanced-openstack-topics/setting-up-your-own-images/how-to-build-windows-image/#how-to-build-and-upload-your-custom-microsoft-windows-image","title":"How to Build and Upload your custom Microsoft Windows Image","text":"

Overall Process

To create a new image, you will need the installation CD or DVD ISO file for the guest operating system. You will also need access to a virtualization tool. You can use KVM hypervisor for this. Or, if you have a GUI desktop virtualization tool (such as, virt-manager, VMware Fusion or VirtualBox), you can use that instead. Convert the file to QCOW2 (KVM, Xen) once you are done.

You can customize and build the new image manually on your own system and then upload the image to the NERC's OpenStack Compute cloud. Please follow the following steps which describes how to obtain, create, and modify virtual machine images that are compatible with the NERC's OpenStack.

"},{"location":"openstack/advanced-openstack-topics/setting-up-your-own-images/how-to-build-windows-image/#1-prerequisite","title":"1. Prerequisite","text":"

Follow these steps to prepare the installation

a. Download a Windows Server 2022 installation ISO file. Evaluation images are available on the Microsoft website (registration required).

b. Download the signed VirtIO drivers ISO file from the Fedora website.

c. Install Virtual Machine Manager on your local Windows 10 machine using WSL:

  • Enable WSL on your local Windows 10 subsystem for Linux:

    The steps given here are straightforward, however, before following them make sure on Windows 10, you have WSL enabled and have at least Ubuntu 20.04 or above LTS version running over it. If you don\u2019t know how to do that then see our tutorial on how to enable WSL and install Ubuntu over it.

  • Download and install MobaXterm:

    MobaXterm is a free application that can be downloaded using this link. After downloading, install it like any other normal Windows software.

  • Open MobaXterm and run WSL Linux:

    As you open this advanced terminal for Windows 10, WSL installed Ubuntu app will show on the left side panel of it. Double click on that to start the WSL session.

  • Install Virt-Manager:

    sudo apt update\nsudo apt install virt-manager\n
  • Run Virtual Machine Manager:

    Start the Virtual Machine Manager running this command on the opened terminal: virt-manager as shown below:

    This will open Virt-Manager as following:

  • Connect QEMU/KVM user session on Virt-Manager:

"},{"location":"openstack/advanced-openstack-topics/setting-up-your-own-images/how-to-build-windows-image/#2-create-a-virtual-machine","title":"2. Create a virtual machine","text":"

Create a virtual machine with the storage set to a 15 GB qcow2 disk image using Virtual Machine Manager

Please set 15 GB disk image size as shown below:

Set the virtual machine name and also make sure \"Customize configuration before install\" is selected as shown below:

"},{"location":"openstack/advanced-openstack-topics/setting-up-your-own-images/how-to-build-windows-image/#3-customize-the-virtual-machine","title":"3. Customize the Virtual machine","text":"

Enable the VirtIO driver. By default, the Windows installer does not detect the disk.

Click Add Hardware > select CDROM device and attach to downloaded virtio-win-* ISO file:

Make sure the NIC is using the virtio Device model as shown below:

Make sure to set proper order of Boot Options as shown below, so that CDROM with Windows ISO is set on the first and Apply the order change. After this please begin windows installation by clicking on \"Begin Installation\" button.

Click \"Apply\" button.

"},{"location":"openstack/advanced-openstack-topics/setting-up-your-own-images/how-to-build-windows-image/#4-continue-with-the-windows-installation","title":"4. Continue with the Windows installation","text":"

You need to continue with the Windows installation process.

When prompted you can choose \"Windows Server 2022 Standard Evaluation (Desktop Experinece)\" option as shown below:

Load VirtIO SCSI drivers and network drivers by choosing an installation target when prompted. Click Load driver and browse the file system.

Select the E:\\virtio-win-*\\viostor\\2k22\\amd64 folder. When converting an image file with Windows, ensure the virtio driver is installed. Otherwise, you will get a blue screen when launching the image due to lack of the virtio driver.

The Windows installer displays a list of drivers to install. Select the VirtIO SCSI drivers.

Click Load driver again and browse the file system, and select the E:\\NetKVM\\2k22\\amd64 folder.

Select the network drivers, and continue the installation.

"},{"location":"openstack/advanced-openstack-topics/setting-up-your-own-images/how-to-build-windows-image/#5-restart-the-installed-virtual-machine-vm","title":"5. Restart the installed virtual machine (VM)","text":"

Once the installation is completed, the VM restarts

Define a password for the Adminstrator when prompted and click on \"Finish\" button:

Send the \"Ctrl+Alt+Delete\" key using Send Key Menu, this will unlock the windows and then prompt login for the Administrator - please login using the password you set on previous step:

"},{"location":"openstack/advanced-openstack-topics/setting-up-your-own-images/how-to-build-windows-image/#6-go-to-device-manager-and-install-all-unrecognized-devices","title":"6. Go to device manager and install all unrecognized devices","text":"

Similarly as shown above repeat and install all missing drivers.

"},{"location":"openstack/advanced-openstack-topics/setting-up-your-own-images/how-to-build-windows-image/#7-enable-remote-desktop-protocol-rdp-login","title":"7. Enable Remote Desktop Protocol (RDP) login","text":"

Explicitly enable RDP login and uncheck \"Require computers to use Network Level Authentication to connect\" option

"},{"location":"openstack/advanced-openstack-topics/setting-up-your-own-images/how-to-build-windows-image/#8-delete-the-recovery-parition","title":"8. Delete the recovery parition","text":"

Delete the recovery parition which will allow expanding the Image as required running the following commands on Command Prompt (Run as Adminstrator)

diskpart\nselect disk 0\nlist partition\nselect partition 3\ndelete partition override\nlist partition\n

and then extend C: drive to take up the remaining space using \"Disk Management\".

"},{"location":"openstack/advanced-openstack-topics/setting-up-your-own-images/how-to-build-windows-image/#9-install-any-new-windows-updates-optional","title":"9. Install any new Windows updates. (Optional)","text":""},{"location":"openstack/advanced-openstack-topics/setting-up-your-own-images/how-to-build-windows-image/#10-setup-cloudbase-init-to-generate-qcow2-image","title":"10. Setup cloudbase-init to generate QCOW2 image","text":"

Download and install stable version of cloudbase-init (A Windows project providing guest initialization features, similar to cloud-init) by browsing the Download Page on the web browser on virtual machine running Windows, you can escape registering and just click on \"No. just show me the downloads\" to navigate to the download page as shown below:

During Installation, set Serial port for logging to COM1 as shown below:

When the installation is done, in the Complete the Cloudbase-Init Setup Wizard window, select the Run Sysprep and Shutdown check boxes and click \"Finish\" as shown below:

Wait for the machine to shutdown.

"},{"location":"openstack/advanced-openstack-topics/setting-up-your-own-images/how-to-build-windows-image/#11-where-is-the-newly-generated-qcow2-image","title":"11. Where is the newly generated QCOW2 image?","text":"

The Sysprep will generate QCOW2 image i.e. win2k22.qcow2 on /home/<YourUserName>/.local/share/libvirt/images/

"},{"location":"openstack/advanced-openstack-topics/setting-up-your-own-images/how-to-build-windows-image/#12-create-openstack-image-and-push-to-nercs-image-list","title":"12. Create OpenStack image and push to NERC's image list","text":"

You can copy/download this windows image to the folder where you configured your OpenStack CLI as described Here and upload to the NERC's OpenStack running the following OpenStack Image API command:

openstack image create --disk-format qcow2 --file win2k22.qcow2 MS-Windows-2022\n

You can verify the uploaded image is available by running:

openstack image list\n\n+--------------------------------------+---------------------+--------+\n| ID                                   | Name                | Status |\n+--------------------------------------+---------------------+--------+\n| a9b48e65-0cf9-413a-8215-81439cd63966 | MS-Windows-2022     | active |\n| ...                                  | ...                 | ...    |\n+--------------------------------------+---------------------+--------+\n
"},{"location":"openstack/advanced-openstack-topics/setting-up-your-own-images/how-to-build-windows-image/#13-launch-an-instance-using-newly-uploaded-ms-windows-2022-image","title":"13. Launch an instance using newly uploaded MS-Windows-2022 image","text":"

Login to the NERC's OpenStack and verify the uploaded MS-Windows-2022 is there also available on the NERC's OpenStack Images List for your project as shown below:

Create a Volume using that Windows Image:

Once successfully Volume is created, we can use the Volume to launch an instance as shown below:

Add other information and setup a Security Group that allows RDP (port: 3389) as shown below:

After some time the instance will be Active in Running state as shown below:

Attach a Floating IP to your instance:

More About Floating IP

If you don't have any available floating IPs, please refer to this documentation on how to allocate a new Floating IP to your project.

Click on detail view of the Instance and then click on Console tab menu and click on \"Send CtrlAltDel\" button located on the top right side of the console as shown below:

"},{"location":"openstack/advanced-openstack-topics/setting-up-your-own-images/how-to-build-windows-image/#14-how-to-have-remote-desktop-login-to-your-windows-instance","title":"14. How to have Remote Desktop login to your Windows instance","text":"

Remote Desktop login should work with the Floating IP associated with the instance:

For more detailed information about OpenStack's image management, the OpenStack image creation guide provides further references and details.

"},{"location":"openstack/advanced-openstack-topics/terraform/terraform-on-NERC/","title":"Provisioning the NERC resources using Terraform","text":"

Terraform is an open-source Infrastructure as Code (IaC) software tool that works with NERC and allows you to orchestrate, provision, and manage infrastructure resources quickly and easily. Terraform codifies cloud application programming interfaces (APIs) into human-readable, declarative configuration (*.tf) files. These files are used to manage underlying infrastructure rather than through NERC's web-based graphical interface - Horizon. Terraform allows you to build, change, and manage your infrastructure in a safe, consistent, and repeatable way by defining resource configurations that you can version, reuse, and share. Terraform\u2019s main job is to create, modify, and destroy compute instances, private networks and other NERC resources.

"},{"location":"openstack/advanced-openstack-topics/terraform/terraform-on-NERC/#benefits-of-terraform","title":"Benefits of Terraform","text":"

If you have multiple instances/ VMs you are managing for your work or research, it can be simpler and more reproducible if you are doing it with automation tool like Terraform.

"},{"location":"openstack/advanced-openstack-topics/terraform/terraform-on-NERC/#installing-terraform","title":"Installing Terraform","text":"

To use Terraform you will need to install it from here.

"},{"location":"openstack/advanced-openstack-topics/terraform/terraform-on-NERC/#basic-template-to-use-terraform-on-your-nerc-project","title":"Basic Template to use Terraform on your NERC Project","text":"

You can Git clone: git clone https://github.com/nerc-project/terraform-nerc.git and run our base template for terraform to provision some basic NERC's OpenStack resources using this terraform-nerc repo.

Note

The main branch of this git repo should be a good starting point in developing your own terraform code.

"},{"location":"openstack/advanced-openstack-topics/terraform/terraform-on-NERC/#template-to-setup-r-shiny-server-using-terraform-on-your-nerc-project","title":"Template to setup R Shiny server using Terraform on your NERC Project","text":"

You can Git clone: git clone https://github.com/nerc-project/terraform-nerc-r-shiny.git and can run this template locally using terraform to provision R Shiny server on NERC's OpenStack resources using this terraform-nerc-r-shiny repo.

Important Note

Please make sure to review bash script file i.e. install-R-Shiny.sh located in this repo that is pointing as user-data-path variable in example.tfvars. This repo includes the script required to setup Shiny R server. You can use similar concept to any other project that needs custom user defined scripts while launching an instance. If you want to change and update this script you can just change this file and then run terraform plan and terraform apply command pointing this example.tfvars file.

"},{"location":"openstack/advanced-openstack-topics/terraform/terraform-on-NERC/#how-terraform-works","title":"How Terraform Works","text":"

Terraform reads configuration files and provides an execution plan of changes, which can be reviewed for safety and then applied and provisioned. Terraform reads all files with the extension .tf in your current directory. Resources can be in a single file, or organised across several different files.

The basic Terraform deployment workflow is:

i. Scope - Identify the infrastructure for your project.

ii. Author - Write the configuration for your infrastructure in which you declare the elements of your infrastructure that you want to create.

The format of the resource definition is straightforward and looks like this:

resource type_of_resource \"resource name\" {\n    attribute = \"attribue value\"\n    ...\n}\n

iii. Initialize - Install the plugins Terraform needs to manage the infrastructure.

iv. Plan - Preview the changes Terraform will make to match your configuration.

v. Apply - Make the planned changes.

"},{"location":"openstack/advanced-openstack-topics/terraform/terraform-on-NERC/#running-terraform","title":"Running Terraform","text":"

The Terraform deployment workflow on the NERC looks like this:

"},{"location":"openstack/advanced-openstack-topics/terraform/terraform-on-NERC/#prerequisite","title":"Prerequisite","text":"
  1. You can download the \"NERC's OpenStack RC File\" with the credentials for your NERC project from the NERC's OpenStack dashboard. Then you need to source that RC file using: source *-openrc.sh. You can read here on how to do this.

  2. Setup SSH key pairs running ssh-keygen -t rsa -f username-keypair and then make sure the newly generated SSH key pairs exist on your ~/.ssh folder.

"},{"location":"openstack/advanced-openstack-topics/terraform/terraform-on-NERC/#terraform-init","title":"Terraform Init","text":"

The first command that should be run after writing a new Terraform configuration or cloning an existing one is terraform init. This command is used to initialize a working directory containing Terraform configuration files and install the plugins.

Information

You will need to run terraform init if you make any changes to providers.

"},{"location":"openstack/advanced-openstack-topics/terraform/terraform-on-NERC/#terraform-plan","title":"Terraform Plan","text":"

terraform plan command creates an execution plan, which lets you preview the changes that Terraform plans to make to your infrastructure based on your configuration files.

"},{"location":"openstack/advanced-openstack-topics/terraform/terraform-on-NERC/#terraform-apply","title":"Terraform Apply","text":"

When you use terraform apply without passing it a saved plan file, it incorporates the terraform plan command functionality and so the planning options are also available while running this command.

"},{"location":"openstack/advanced-openstack-topics/terraform/terraform-on-NERC/#input-variables-on-the-command-line","title":"Input Variables on the Command Line","text":"

You can use the -var 'NAME=VALUE' command line option to specify values for input variables declared in your root module for e.g. terraform plan -var 'name=value'

In most cases, it will be more convenient to set values for potentially many input variables declared in the root module of the configuration, using definitions from a \"tfvars\" file and use it using -var-file=FILENAME command for e.g. terraform plan -var-file=FILENAME

"},{"location":"openstack/advanced-openstack-topics/terraform/terraform-on-NERC/#track-your-infrastructure-and-collaborate","title":"Track your infrastructure and Collaborate","text":"

Terraform keeps track of your real infrastructure in a state file, which acts as a source of truth for your environment. Terraform uses the state file to determine the changes to make to your infrastructure so that it will match your configuration. Terraform's state allows you to track resource changes throughout your deployments. You can securely share your state with your teammates, provide a stable environment for Terraform to run in, and prevent race conditions when multiple people make configuration changes at once.

"},{"location":"openstack/advanced-openstack-topics/terraform/terraform-on-NERC/#some-useful-terraform-commands","title":"Some useful Terraform commands","text":"
terraform init\n\nterraform fmt\n\nterraform validate\n\nterraform plan\n\nterraform apply\n\nterraform show\n\nterraform destroy\n\nterraform output\n
"},{"location":"openstack/backup/backup-with-snapshots/","title":"Backup with snapshots","text":"

When you start a new instance, you can choose the Instance Boot Source from the following list:

  • boot from image

  • boot from instance snapshot

  • boot from volume

  • boot from volume snapshot

In its default configuration, when the instance is launched from an Image or an Instance Snapshot, the choice for utilizing persistent storage is configured by selecting the Yes option for \"Create New Volume\". Additionally, the \"Delete Volume on Instance Delete\" setting is pre-set to No, as indicated here:

Very Important: How do you make your VM setup and data persistent?

For more in-depth information on making your VM setup and data persistent, you can explore the details here.

"},{"location":"openstack/backup/backup-with-snapshots/#create-and-use-instance-snapshots","title":"Create and use Instance snapshots","text":"

The OpenStack snapshot mechanism allows you to create new images from your instances while they are either running or stopped. An instance snapshot captures the current state of a running VM along with its storage, configuration, and memory. It includes the VM's disk image, memory state, and any configuration settings. Useful for preserving the entire state of a VM, including its running processes and in-memory data.

This mainly serves two purposes:

  • As a backup mechanism: save the main disk of your instance to an image in Horizon dashboard under Project -> Compute -> Images and later boot a new instance from this image with the saved data.

  • As a templating mechanism: customise and upgrade a base image and save it to use as a template for new instances.

Considerations: using Instance snapshots

It consumes more storage space due to including memory state. So, make sure your resource allocations for Storage is sufficient to hold all. They are suitable for scenarios where maintaining the exact VM state is crucial. The creation time of instance snapshot will be proportional to the size of the VM state.

"},{"location":"openstack/backup/backup-with-snapshots/#how-to-create-an-instance-snapshot","title":"How to create an instance snapshot","text":""},{"location":"openstack/backup/backup-with-snapshots/#using-the-cli","title":"Using the CLI","text":"

Prerequisites:

To run the OpenStack CLI commands, you need to have:

  • OpenStack CLI setup, see OpenStack Command Line setup for more information.

To snapshot an instance to an image using the CLI, do this:

"},{"location":"openstack/backup/backup-with-snapshots/#using-the-openstack-client","title":"Using the openstack client","text":"
openstack server image create --name <name of my snapshot> --wait <instance name or uuid>\n
"},{"location":"openstack/backup/backup-with-snapshots/#to-view-newly-created-snapshot-image","title":"To view newly created snapshot image","text":"
openstack image show --fit-width <name of my snapshot>\n

Using this snapshot, the VM can be rolled back to the previous state with a server rebuild.

openstack server rebuild --image <name of my snapshot> <existing instance name or uuid>\n

For e.g.

openstack server image create --name my-snapshot --wait test-nerc-0\n\nopenstack image show --fit-width my-snapshot\n\nopenstack server rebuild --image my-snapshot test-nerc-0\n

Important Information

During the time it takes to do the snapshot, the machine can become unresponsive.

"},{"location":"openstack/backup/backup-with-snapshots/#using-horizon-dashboard","title":"Using Horizon dashboard","text":"

Once you're logged in to NERC's Horizon dashboard, you can create a snapshot via the \"Compute -> Instances\" page by clicking on the \"Create snapshot\" action button on desired instance as shown below:

Live snapshots and data consistency

We call a snapshot taken against a running instance with no downtime a \"live snapshot\". These snapshots are simply disk-only snapshots, and may be inconsistent if the instance's OS is not aware of the snapshot being taken. This is why we highly recommend, if possible, to Shut Off the instance before creating snapshots.

"},{"location":"openstack/backup/backup-with-snapshots/#how-to-restore-from-instance-snapshot","title":"How to restore from Instance snapshot","text":"

Once created, you can find the image listed under Images in the Horizon dashboard.

Navigate to Project -> Compute -> Images.

You have the option to launch this image as a new instance, or by clicking on the arrow next to Launch, create a volume from the image, edit details about the image, update the image metadata, or delete it:

You can then select the snapshot when creating a new instance or directly click \"Launch\" button to use the snapshot image to launch a new instance.

"},{"location":"openstack/backup/backup-with-snapshots/#take-and-use-volume-snapshots","title":"Take and use Volume Snapshots","text":""},{"location":"openstack/backup/backup-with-snapshots/#volume-snapshots","title":"Volume snapshots","text":"

You can also create snapshots of a volume, that then later can be used to create other volumes or to rollback to a precedent point in time. You can take a snapshot of volume that may or may not be attached to an instance. Snapshot of available volumes or volumes that are not attached to an instance does not affect the data on the volume. Snapshot of a volume serves as a backup for the persistent data on the volume at a given point in time. Snapshots are of the size of the actual data existing on the volume at the time at which the snapshot is taken. Volume snapshots are pointers in the RW history of a volume. The creation of a snapshot takes a few seconds and it can be done while the volume is in-use.

Warning

Taking snapshots of volumes that are in use or attached to active instances can result in data inconsistency on the volume. This is why we highly recommend, if possible, to Shut Off the instance before creating snapshots.

Once you have the snapshot, you can use it to create other volumes based on this snapshot. Creation time for these volumes may depend on the type of the volume you are creating as it may entitle some data transfer. But this is efficient for backup and recovery of specific data without the need for the complete VM state. Also, it consumes less storage space compared to instance snapshots.

"},{"location":"openstack/backup/backup-with-snapshots/#how-to-create-a-volume-snapshot","title":"How to create a volume snapshot","text":""},{"location":"openstack/backup/backup-with-snapshots/#using-the-openstack-cli","title":"Using the OpenStack CLI","text":"

Prerequisites:

To run the OpenStack CLI commands, you need to have:

  • OpenStack CLI setup, see OpenStack Command Line setup for more information.

To snapshot an instance to an image using the CLI, do this:

"},{"location":"openstack/backup/backup-with-snapshots/#using-the-openstack-client-commands","title":"Using the openstack client commands","text":"
openstack volume snapshot create --volume <volume name or uuid> <name of my snapshot>\n

For e.g.

openstack volume snapshot create --volume test_volume my-volume-snapshot\n+-------------+--------------------------------------+\n| Field       | Value                                |\n+-------------+--------------------------------------+\n| created_at  | 2022-04-12T19:48:42.707250           |\n| description | None                                 |\n| id          | f1cf6846-4aba-4eb8-b3e4-2ff309f8f599 |\n| name        | my-volume-snapshot                   |\n| properties  |                                      |\n| size        | 25                                   |\n| status      | creating                             |\n| updated_at  | None                                 |\n| volume_id   | f2630d21-f8f5-4f02-adc7-14a3aa72cc9d |\n+-------------+--------------------------------------+\n

Important Information

if the volume is in-use, you may need to specify --force

You can list the volume snapshots with the following command.

openstack volume snapshot list\n

For e.g.

openstack volume snapshot list\n+--------------------------------------+--------------------+-------------+-----------+------+\n| ID                                   | Name               | Description | Status    | Size |\n+--------------------------------------+--------------------+-------------+-----------+------+\n| f1cf6846-4aba-4eb8-b3e4-2ff309f8f599 | my-volume-snapshot | None        | available |   25 |\n+--------------------------------------+--------------------+-------------+-----------+------+\n

Once the volume snapshot is in available state, then you can create other volumes based on that snapshot. You don't need to specify the size of the volume, it will use the size of the snapshot.

openstack volume create --description --source <name of my snapshot> \"Volume from an snapshot\" <volume name or uuid>\n

You can delete the snapshots just by issuing the following command

openstack volume snapshot delete <name of my snapshot>\n

For e.g.

openstack volume snapshot delete my-volume-snapshot\n
"},{"location":"openstack/backup/backup-with-snapshots/#using-nercs-horizon-dashboard","title":"Using NERC's Horizon dashboard","text":"

Once you're logged in to NERC's Horizon dashboard, you can create a snapshot via the \"Volumes\" menu by clicking on the \"Create Snapshot\" action button on desired volume as shown below:

In the dialog box that opens, enter a snapshot name and a brief description.

"},{"location":"openstack/backup/backup-with-snapshots/#how-to-restore-from-volume-snapshot","title":"How to restore from Volume snapshot","text":"

Once a snapshot is created and is in \"Available\" status, you can view and manage it under the Volumes menu in the Horizon dashboard under Volume Snapshots.

Navigate to Project -> Volumes -> Snapshots.

You have the option to directly launch this volume as an instance by clicking on the arrow next to \"Create Volume\" and selecting \"Launch as Instance\".

Also it has other options i.e. to create a volume from the snapshot, edit details about the snapshot, delete it, or Update the snapshot metadata.

Here, we will first Create Volume from Snapshot by clicking \"Create Volume\" button as shown below:

In the dialog box that opens, enter a volume name and a brief description.

Any snapshots made into volumes can be found under Volumes:

Navigate to Project -> Volumes -> Volumes.

Then using this newly created volume, you can launch it as an instance by clicking on the arrow next to \"Edit Volume\" and selecting \"Launch as Instance\" as shown below:

Very Important: Requested/Approved Allocated Storage Quota and Cost

Please remember that any volumes and snapshots stored will consume your Storage quotas, which represent the storage space allocated to your project. For NERC (OpenStack) Resource Allocations, storage quotas are specified by the \"OpenStack Volume Quota (GiB)\" and \"OpenStack Swift Quota (GiB)\" allocation attributes. You can delete any volumes and snapshots that are no longer needed to free up space. However, even if you delete volumes and snapshots, you will still be billed based on your approved and reserved storage allocation, which reserves storage from the total NESE storage pool.

If you request additional storage by specifying a changed quota value for the \"OpenStack Volume Quota (GiB)\" and \"OpenStack Swift Quota (GiB)\" allocation attributes through NERC's ColdFront interface, invoicing for the extra storage will take place upon fulfillment or approval of your request, as explained in our Billing FAQs.

Conversely, if you request a reduction in the Storage quotas, specified by the \"OpenStack Volume Quota (GiB)\" and \"OpenStack Swift Quota (GiB)\", through a change request using ColdFront, your invoicing will be adjusted accordingly when the request is submitted.

In both scenarios, 'invoicing' refers to the accumulation of hours corresponding to the added or removed storage quantity.

Help Regarding Billing

Please send your questions or concerns regarding Storage and Cost by emailing us at help@nerc.mghpcc.org or, by submitting a new ticket at the NERC's Support Ticketing System.

"},{"location":"openstack/create-and-connect-to-the-VM/assign-a-floating-IP/","title":"Assign a Floating IP","text":"

When an instance is created in OpenStack, it is automatically assigned a fixed IP address in the network to which the instance is assigned. This IP address is permanently associated with the instance until the instance is terminated.

However, in addition to the fixed IP address, a Floating IP address can also be attached to an instance. Unlike fixed IP addresses, Floating IP addresses can have their associations modified at any time, regardless of the state of the instances involved. Floating IPs are a limited resource, so your project will have a quota based on its needs. You should only assign public IPs to VMs that need them. This procedure details the reservation of a Floating IP address from an existing pool of addresses and the association of that address with a specific instance.

By attaching a Floating IP to your instance, you can ssh into your vm from your local machine.

Make sure you are using key forwarding as described in Create a Key Pair.

"},{"location":"openstack/create-and-connect-to-the-VM/assign-a-floating-IP/#allocate-a-floating-ip","title":"Allocate a Floating IP","text":"

Navigate to Project -> Compute -> Instances.

Next to Instance Name -> Click Actions dropdown arrow (far right) -> Choose Associate Floating IP

If you have some floating IPs already allocated to your project which are not yet associated with a VM, they will be available in the dropdown list on this screen.

If you have no floating IPs allocated, or all your allocated IPs are in use already, the dropdown list will be empty.

Click the \"+\" icon to allocate an IP. You will see the following screen.

Make sure 'provider' appears in the dropdown menu, and that you have not already met your quota of allocated IPs.

In this example, the project has a quota of 50 floating IPs, but we have allocated 5 so far, so we can still allocate up to next 45 Floating IPs.

Click \"Allocate IP\".

You will get a green \"success\" popup in the top right corner that shows your public IP address and that is listed as option to choose from \"IP Address\" dropdown list.

You will be able to select between multiple Floating IPs under \"IP Address\" dropdown and any unassociated VMs from \"Port to be associated\" dropdown options:

Now click on \"Associate\" button.

Then, a green \"success\" popup in the top left and you can see the Floating IP is attached to your VM on the Instances page:

Floating IP Quota Exceed

If you have already exceed your quota, you will get a red error message saying \"You are already using all of your available floating IPs\" as shown below:

NOTE: By default, each approved project is provided with only 2 OpenStack Floating IPs, regardless of the units requested in the quota, as described here. Your PI or Project Manager(s) can adjust the quota and request additional Floating IPs as needed, following this documentation. This is controlled by the \"OpenStack Floating IP Quota\" attribute.

"},{"location":"openstack/create-and-connect-to-the-VM/assign-a-floating-IP/#disassociate-a-floating-ip","title":"Disassociate a Floating IP","text":"

You may need to disassociate a Floating IP from an instance which no longer needs it, so you can assign it to one that does.

Navigate to Project -> Compute -> Instances.

Find the instance you want to remove the IP from in the list. Click the red \"Disassociate Floating IP\" to the right.

This IP will be disassociated from the instance, but it will still remain allocated to your project.

"},{"location":"openstack/create-and-connect-to-the-VM/assign-a-floating-IP/#release-a-floating-ip","title":"Release a Floating IP","text":"

You may discover that your project does not need all the floating IPs that are allocated to it.

We can release a Floating IP while disassociating it just we need to check the \"Release Floating IP\" option as shown here:

OR,

Navigate to Project -> Network -> Floating IPs.

To release the Floating IP address back into the Floating IP pool, click the Release Floating IP option in the Actions column.

Pro Tip

You can also choose multiple Floating IPs and release them all at once.

"},{"location":"openstack/create-and-connect-to-the-VM/create-a-Windows-VM/","title":"Create a Windows virtual machine","text":""},{"location":"openstack/create-and-connect-to-the-VM/create-a-Windows-VM/#launch-an-instance-using-a-boot-volume","title":"Launch an Instance using a boot volume","text":"

In this example, we will illustrate how to utilize a boot volume to launch a Windows virtual machine, similar steps can be used on other types of virtual machines. The following steps show how to create a virtual machine which boots from an external volume:

  • Create a volume with source data from the image

  • Launch a VM with that volume as the system disk

Recommendations

  • The recommended method to create a Windows desktop virtual machine is boot from volume, although you can also launch a Windows-based instance following the normal process using boot from image as described here.

  • To ensure smooth upgrade and maintenance of the system, select at least 100 GiB for the size of the volume.

  • Make sure your project has sufficient storage quotas.

"},{"location":"openstack/create-and-connect-to-the-VM/create-a-Windows-VM/#create-a-volume-from-image","title":"Create a volume from image","text":""},{"location":"openstack/create-and-connect-to-the-VM/create-a-Windows-VM/#1-using-nercs-horizon-dashboard","title":"1. Using NERC's Horizon dashboard","text":"

Navigate: Project -> Compute -> Images.

Make sure you are able to see MS-Windows-2022 is available on Images List for your project as shown below:

Create a Volume using that Windows Image:

To ensure smooth upgrade and maintenance of the system, select at least 100 GiB for the size of the volume as shown below:

"},{"location":"openstack/create-and-connect-to-the-VM/create-a-Windows-VM/#2-using-the-openstack-cli","title":"2. Using the OpenStack CLI","text":"

Prerequisites:

To run the OpenStack CLI commands, you need to have:

  • OpenStack CLI setup, see OpenStack Command Line setup for more information.

To create a volume from image using the CLI, do this:

"},{"location":"openstack/create-and-connect-to-the-VM/create-a-Windows-VM/#using-the-openstack-client-commands","title":"Using the openstack client commands","text":"

Identify the image for the initial volume contents from openstack image list.

openstack image list\n+--------------------------------------+---------------------+--------+\n| ID                                   | Name                | Status |\n+--------------------------------------+---------------------+--------+\n| a9b48e65-0cf9-413a-8215-81439cd63966 | MS-Windows-2022     | active |\n...\n+--------------------------------------+---------------------+--------+\n

In the example above, this is image id a9b48e65-0cf9-413a-8215-81439cd63966 for MS-Windows-2022.

Creating a disk from this image with a size of 100 GiB named \"my-volume\" as follows.

openstack volume create --image a9b48e65-0cf9-413a-8215-81439cd63966 --size 100 --description \"Using MS Windows Image\" my-volume\n+---------------------+--------------------------------------+\n| Field               | Value                                |\n+---------------------+--------------------------------------+\n| attachments         | []                                   |\n| availability_zone   | nova                                 |\n| bootable            | false                                |\n| consistencygroup_id | None                                 |\n| created_at          | 2024-02-03T23:38:50.000000           |\n| description         | Using MS Windows Image               |\n| encrypted           | False                                |\n| id                  | d8a5da4c-41c8-4c2d-b57a-8b6678ce4936 |\n| multiattach         | False                                |\n| name                | my-volume                            |\n| properties          |                                      |\n| replication_status  | None                                 |\n| size                | 100                                  |\n| snapshot_id         | None                                 |\n| source_volid        | None                                 |\n| status              | creating                             |\n| type                | tripleo                              |\n| updated_at          | None                                 |\n| user_id             | 938eb8bfc72e4cb3ad2b94e2eb4059f7     |\n+---------------------+--------------------------------------+\n

Checking the status again using openstack volume show my-volume will allow the volume creation to be followed.

\"downloading\" means that the volume contents is being transferred from the image service to the volume service

\"available\" means the volume can now be used for booting. A set of volume_image meta data is also copied from the image service.

"},{"location":"openstack/create-and-connect-to-the-VM/create-a-Windows-VM/#launch-instance-from-existing-bootable-volume","title":"Launch instance from existing bootable volume","text":""},{"location":"openstack/create-and-connect-to-the-VM/create-a-Windows-VM/#1-using-horizon-dashboard","title":"1. Using Horizon dashboard","text":"

Navigate: Project -> Volumes -> Volumes.

Once successfully Volume is created, we can use the Volume to launch an instance as shown below:

How do you make your VM setup and data persistent?

Only one instance at a time can be booted from a given volume. Make sure \"Delete Volume on Instance Delete\" is selected as No if you want the volume to persist even after the instance is terminated, which is the default setting, as shown below:

NOTE: For more in-depth information on making your VM setup and data persistent, you can explore the details here.

Add other information and setup a Security Group that allows RDP (port: 3389) as shown below:

Very Important: Setting Administrator Credentials to Log into Your VM.

To access this Windows VM, you must log in using Remote Desktop, as described here. To configure a password for the \"Administrator\" user account, proceed to the \"Configuration\" section and enter the supplied PowerShell-based Customized Script. Make sure to substitute <Your_Own_Admin_Password> with your preferred password, which will enable Remote Desktop login to the Windows VM.

#ps1\n\nnet user Administrator <Your_Own_Admin_Password>\n

Please ensure that your script in the \"Configuration\" section resembles the following syntax:

After some time the instance will be Active in Running state as shown below:

Attach a Floating IP to your instance:

"},{"location":"openstack/create-and-connect-to-the-VM/create-a-Windows-VM/#2-using-the-openstack-cli-from-the-terminal","title":"2. Using the OpenStack CLI from the terminal","text":"

Prerequisites:

To run the OpenStack CLI commands, you need to have:

  • OpenStack CLI setup, see OpenStack Command Line setup for more information.

To launch an instance from existing bootable volume using the CLI, do this:

"},{"location":"openstack/create-and-connect-to-the-VM/create-a-Windows-VM/#using-the-openstack-client-commands-from-terminal","title":"Using the openstack client commands from terminal","text":"

Get the flavor name using openstack flavor list:

openstack flavor list | grep cpu-su.4\n| b3f5dded-efe3-4630-a988-2959b73eba70 | cpu-su.4      |  16384 |   20 |         0 |     4 | True      |\n

To access this Windows VM, you must log in using Remote Desktop, as described here. Before launching the VM using the OpenStack CLI, we'll prepare a PowerShell-based Customized Script as \"user-data\".

What is a user data file?

A user data file is a text file that you can include when running the openstack server create command. This file is used to customize your instance during boot.

You can place user data in a local file and pass it through the --user-data <user-data-file> parameter at instance creation. You'll create a local file named admin_password.ps1 with the following content. Please remember to replace <Your_Own_Admin_Password> with your chosen password, which will be used to log in to the Windows VM via Remote Desktop.

#ps1\n\nnet user Administrator <Your_Own_Admin_Password>\n

Setup a Security Group named \"rdp_test\" that allows RDP (port: 3389) using the CLI, use the command openstack security group create <group-name>:

openstack security group create --description 'Allows RDP' rdp_test\n\nopenstack security group rule create --protocol tcp --dst-port 3389 rdp_test\n

To create a Windows VM named \"my-vm\" using the specified parameters, including the flavor name \"cpu-su.4\", existing key pair \"my-key\", security group \"rdp_test\", user data from the file \"admin_password.ps1\" created above, and the volume with name \"my-volume\" created above, you can run the following command:

openstack server create --flavor cpu-su.4 \\\n    --key-name my-key \\\n    --security-group rdp_test \\\n    --user-data admin_password.ps1 \\\n    --volume my-volume \\\n    my-vm\n

To list all Floating IP addresses that are allocated to the current project, run:

openstack floating ip list\n\n+--------------------------------------+---------------------+------------------+------+\n| ID                                   | Floating IP Address | Fixed IP Address | Port |\n+--------------------------------------+---------------------+------------------+------+\n| 760963b2-779c-4a49-a50d-f073c1ca5b9e | 199.94.60.220       | 192.168.0.195    | None |\n+--------------------------------------+---------------------+------------------+------+\n

More About Floating IP

If the above command returns an empty list, meaning you don't have any available floating IPs, please refer to this documentation on how to allocate a new Floating IP to your project.

Attach a Floating IP to your instance:

openstack server add floating ip INSTANCE_NAME_OR_ID FLOATING_IP_ADDRESS\n

For example:

openstack server add floating ip my-vm 199.94.60.220\n
"},{"location":"openstack/create-and-connect-to-the-VM/create-a-Windows-VM/#accessing-the-graphical-console-in-the-horizon-dashboard","title":"Accessing the graphical console in the Horizon dashboard","text":"

You can access the graphical console using the browser once the VM is in status ACTIVE. It can take up to 15 minutes to reach this state.

The console is accessed by selecting the Instance Details for the machine and the 'Console' tab as shown below:

"},{"location":"openstack/create-and-connect-to-the-VM/create-a-Windows-VM/#how-to-add-remote-desktop-login-to-your-windows-instance","title":"How to add Remote Desktop login to your Windows instance","text":"

When the build and the Windows installation steps have completed, you can access the console using the Windows Remote Desktop application. Remote Desktop login should work with the Floating IP associated with the instance:

What is the user login for Windows Server 2022?

The default username is \"Administrator,\" and the password is the one you set using the user data PowerShell script during the launch.

Storage and Volume

  • System disks are the first disk based on the flavor disk space and are generally used to store the operating system created from an image when the virtual machine is booted.

  • Volumes are persistent virtualized block devices independent of any particular instance. Volumes may be attached to a single instance at a time, but may be detached or reattached to a different instance while retaining all data, much like a USB drive. The size of the volume can be selected when it is created within the storage quota limits for the particular resource allocation.

"},{"location":"openstack/create-and-connect-to-the-VM/create-a-Windows-VM/#connect-additional-disk-using-volume","title":"Connect additional disk using volume","text":"

To attach additional disk to a running Windows machine you can follow this documentation. This guide provides instructions on formatting and mounting a volume as an attached disk within a Windows virtual machine.

"},{"location":"openstack/create-and-connect-to-the-VM/flavors/","title":"Nova flavors","text":"

In NERC OpenStack, flavors define the compute, memory, and storage capacity of nova computing instances. In other words, a flavor is an available hardware configuration for a server.

Note

Flavors are visible only while you are launching an instance and under \"Flavor\" tab as explained here.

The important fields are

Field Description RAM Memory size in MiB Disk Size of disk in GiB Ephemeral Size of a second disk. 0 means no second disk is defined and mounted. VCPUs Number of virtual cores"},{"location":"openstack/create-and-connect-to-the-VM/flavors/#comparison-between-cpu-and-gpu","title":"Comparison Between CPU and GPU","text":"

Here are the key differences between CPUs and GPUs:

CPUs GPUs Work mostly in sequence. While several cores and excellent task switching give the impression of parallelism, a CPU is fundamentally designed to run one task at a time. Are designed to work in parallel. A vast number of cores and threading managed in hardware enable GPUs to perform many simple calculations simultaneously. Are designed for task parallelism. Are designed for data parallelism. Have a small number of cores that can complete single complex tasks at very high speeds. Have a large number of cores that work in tandem to compute many simple tasks. Have access to a large amount of relatively slow RAM with low latency, optimizing them for latency (operation). Have access to a relatively small amount of very fast RAM with higher latency, optimizing them for throughput. Have a very versatile instruction set, allowing the execution of complex tasks in fewer cycles but creating overhead in others. Have a limited (but highly optimized) instruction set, allowing them to execute their designed tasks very efficiently. Task switching (as a result of running the OS) creates overhead. Task switching is not used; instead, numerous serial data streams are processed in parallel from point A to point B. Will always work for any given use case but may not provide adequate performance for some tasks. Would only be a valid choice for some use cases but would provide excellent performance in those cases.

In summary, for applications such as Machine Learning (ML), Artificial Intelligence (AI), or image processing, a GPU can provide a performance increase of 50x to 200x compared to a typical CPU performing the same tasks.

"},{"location":"openstack/create-and-connect-to-the-VM/flavors/#currently-our-setup-supports-and-offers-the-following-flavors","title":"Currently, our setup supports and offers the following flavors","text":"

NERC offers the following flavors based on our Infrastructure-as-a-Service (IaaS) - OpenStack offerings (Tiers of Service).

Pro Tip

Choose a flavor for your instance from the available Tier that suits your requirements, use-cases, and budget when launching a VM as shown here.

"},{"location":"openstack/create-and-connect-to-the-VM/flavors/#1-standard-compute-tier","title":"1. Standard Compute Tier","text":"

The standard compute flavor \"cpu-su\" is provided from Lenovo SD530 (2x Intel 8268 2.9 GHz, 48 cores, 384 GB memory) server. The base unit is 1 vCPU, 4 GB memory with default of 20 GB root disk at a rate of $0.013 / hr of wall time.

Flavor SUs GPU vCPU RAM(GiB) Storage(GiB) Cost / hr cpu-su.1 1 0 1 4 20 $0.013 cpu-su.2 2 0 2 8 20 $0.026 cpu-su.4 4 0 4 16 20 $0.052 cpu-su.8 8 0 8 32 20 $0.104 cpu-su.16 16 0 16 64 20 $0.208"},{"location":"openstack/create-and-connect-to-the-VM/flavors/#2-memory-optimized-tier","title":"2. Memory Optimized Tier","text":"

The memory optimized flavor \"mem-su\" is provided from the same servers at \"cpu-su\" but with 8 GB of memory per core. The base unit is 1 vCPU, 8 GB memory with default of 20 GB root disk at a rate of $0.026 / hr of wall time.

Flavor SUs GPU vCPU RAM(GiB) Storage(GiB) Cost / hr mem-su.1 1 0 1 8 20 $0.026 mem-su.2 2 0 2 16 20 $0.052 mem-su.4 4 0 4 32 20 $0.104 mem-su.8 8 0 8 64 20 $0.208 mem-su.16 16 0 16 128 20 $0.416"},{"location":"openstack/create-and-connect-to-the-VM/flavors/#3-gpu-tier","title":"3. GPU Tier","text":"

NERC also supports the most demanding workloads including Artificial Intelligence (AI), Machine Learning (ML) training and Deep Learning modeling, simulation, data analytics, data visualization, distributed databases, and more. For such demanding workloads, the NERC's GPU-based distributed computing flavor is recommended, which is integrated into a specialized hardware such as GPUs that produce unprecedented performance boosts for technical computing workloads.

Guidelines for Utilizing GPU-Based Flavors in Active Resource Allocation

To effectively utilize GPU-based flavors on any NERC (OpenStack) resource allocation, the Principal Investigator (PI) or project manager(s) must submit a change request for their currently active NERC (OpenStack) resource allocation. This request should specify the number of GPUs they intend to use by setting the \"OpenStack GPU Quota\" attribute. We recommend ensuring that this count accurately reflects the current GPU usage. Additionally, they need to adjust the quota values for \"OpenStack Compute RAM Quota (MiB)\" and \"OpenStack Compute vCPU Quota\" to sufficiently accommodate the GPU flavor they wish to use when launching a VM in their OpenStack Project.

Once the change request is reviewed and approved by the NERC's admin, users will be able to select the appropriate GPU-based flavor during the flavor selection tab when launching a new VM.

There are four different options within the GPU tier, featuring the newer NVIDIA A100 SXM4, NVIDIA A100s, NVIDIA V100s, and NVIDIA K80s.

How can I get customized A100 SXM4 GPUs not listed in the current flavors?

We also provide customized A100 SXM4 GPU-based flavors, which are not publicly listed on our NVIDIA A100 SXM4 40GB GPU Tiers list. These options are exclusively available for demanding projects and are subject to availability.

To request access, please fill out this form. Our team will review your request and reach out to you to discuss further.

"},{"location":"openstack/create-and-connect-to-the-VM/flavors/#i-nvidia-a100-sxm4-40gb","title":"i. NVIDIA A100 SXM4 40GB","text":"

The \"gpu-su-a100sxm4\" flavor is provided from Lenovo SD650-N V2 (2x Intel Xeon Platinum 8358 32C 250W 2.6GHz, 128 cores, 1024 GB RAM 4x NVIDIA HGX A100 40GB) servers. The higher number of tensor cores available can significantly enhance the speed of machine learning applications. The base unit is 32 vCPU, 240 GB memory with default of 20 GB root disk at a rate of $2.078 / hr of wall time.

Flavor SUs GPU vCPU RAM(GiB) Storage(GiB) Cost / hr gpu-su-a100sxm4.1 1 1 32 240 20 $2.078 gpu-su-a100sxm4.2 2 2 64 480 20 $4.156

How to setup NVIDIA driver for \"gpu-su-a100sxm4\" flavor based VM?

After launching a VM with an NVIDIA A100 SXM4 GPU flavor, you will need to setup the NVIDIA driver in order to use GPU-based codes and libraries. Please run the following commands to setup the NVIDIA driver and CUDA version required for these flavors in order to execute GPU-based codes. NOTE: These commands are ONLY applicable for the VM based on \"ubuntu-22.04-x86_64\" image. You might need to find corresponding packages for your own OS of choice.

sudo apt update\nsudo apt -y install nvidia-driver-495\n# Just click *Enter* if any popups appear!\n# Confirm and verify that you can see the NVIDIA device attached to your VM\nlspci | grep -i nvidia\n# 00:05.0 3D controller: NVIDIA Corporation GA100 [A100 SXM4 40GB] (rev a1)\nsudo reboot\n# SSH back to your VM and then you will be able to use nvidia-smi command\nnvidia-smi\n
"},{"location":"openstack/create-and-connect-to-the-VM/flavors/#ii-nvidia-a100-40gb","title":"ii. NVIDIA A100 40GB","text":"

The \"gpu-su-a100\" flavor is provided from Lenovo SR670 (2x Intel 8268 2.9 GHz, 48 cores, 384 GB memory, 4x NVIDIA A100 40GB) servers. These latest GPUs deliver industry-leading high throughput and low latency networking. The base unit is 24 vCPU, 74 GB memory with default of 20 GB root disk at a rate of $1.803 / hr of wall time.

Flavor SUs GPU vCPU RAM(GiB) Storage(GiB) Cost / hr gpu-su-a100.1 1 1 24 74 20 $1.803 gpu-su-a100.2 2 2 48 148 20 $3.606

How to setup NVIDIA driver for \"gpu-su-a100\" flavor based VM?

After launching a VM with an NVIDIA A100 GPU flavor, you will need to setup the NVIDIA driver in order to use GPU-based codes and libraries. Please run the following commands to setup the NVIDIA driver and CUDA version required for these flavors in order to execute GPU-based codes. NOTE: These commands are ONLY applicable for the VM based on \"ubuntu-22.04-x86_64\" image. You might need to find corresponding packages for your own OS of choice.

sudo apt update\nsudo apt -y install nvidia-driver-495\n# Just click *Enter* if any popups appear!\n# Confirm and verify that you can see the NVIDIA device attached to your VM\nlspci | grep -i nvidia\n# 0:05.0 3D controller: NVIDIA Corporation GA100 [A100 PCIe 40GB] (rev a1)\nsudo reboot\n# SSH back to your VM and then you will be able to use nvidia-smi command\nnvidia-smi\n
"},{"location":"openstack/create-and-connect-to-the-VM/flavors/#iii-nvidia-v100-32gb","title":"iii. NVIDIA V100 32GB","text":"

The \"gpu-su-v100\" flavor is provided from Dell R740xd (2x Intel Xeon Gold 6148, 40 cores, 768GB memory, 1x NVIDIA V100 32GB) servers. The base unit is 48 vCPU, 192 GB memory with default of 20 GB root disk at a rate of $1.214 / hr of wall time.

Flavor SUs GPU vCPU RAM(GiB) Storage(GiB) Cost / hr gpu-su-v100.1 1 1 48 192 20 $1.214

How to setup NVIDIA driver for \"gpu-su-v100\" flavor based VM?

After launching a VM with an NVIDIA V100 GPU flavor, you will need to setup the NVIDIA driver in order to use GPU-based codes and libraries. Please run the following commands to setup the NVIDIA driver and CUDA version required for these flavors in order to execute GPU-based codes. NOTE: These commands are ONLY applicable for the VM based on \"ubuntu-22.04-x86_64\" image. You might need to find corresponding packages for your own OS of choice.

sudo apt update\nsudo apt -y install nvidia-driver-470\n# Just click *Enter* if any popups appear!\n# Confirm and verify that you can see the NVIDIA device attached to your VM\nlspci | grep -i nvidia\n# 00:05.0 3D controller: NVIDIA Corporation GV100GL [Tesla V100 PCIe 32GB] (rev a1)\nsudo reboot\n# SSH back to your VM and then you will be able to use nvidia-smi command\nnvidia-smi\n
"},{"location":"openstack/create-and-connect-to-the-VM/flavors/#iv-nvidia-k80-12gb","title":"iv. NVIDIA K80 12GB","text":"

The \"gpu-su-k80\" flavor is provided from Supermicro X10DRG-H (2x Intel E5-2620 2.40GHz, 24 cores, 128GB memory, 4x NVIDIA K80 12GB) servers. The base unit is 6 vCPU, 28.5 GB memory with default of 20 GB root disk at a rate of $0.463 / hr of wall time.

Flavor SUs GPU vCPU RAM(GiB) Storage(GiB) Cost / hr gpu-su-k80.1 1 1 6 28.5 20 $0.463 gpu-su-k80.2 2 2 12 57 20 $0.926 gpu-su-k80.4 4 4 24 114 20 $1.852

How to setup NVIDIA driver for \"gpu-su-k80\" flavor based VM?

After launching a VM with an NVIDIA K80 GPU flavor, you will need to setup the NVIDIA driver in order to use GPU-based codes and libraries. Please run the following commands to setup the NVIDIA driver and CUDA version required for these flavors in order to execute GPU-based codes. NOTE: These commands are ONLY applicable for the VM based on \"ubuntu-22.04-x86_64\" image. You might need to find corresponding packages for your own OS of choice.

sudo apt update\nsudo apt -y install nvidia-driver-470\n# Just click *Enter* if any popups appear!\n# Confirm and verify that you can see the NVIDIA device attached to your VM\nlspci | grep -i nvidia\n# 00:05.0 3D controller: NVIDIA Corporation GK210GL [Tesla K80] (rev a1)\nsudo reboot\n# SSH back to your VM and then you will be able to use nvidia-smi command\nnvidia-smi\n

NERC IaaS Storage Tiers Cost

Storage both OpenStack Swift (object storage) and Cinder (block storage/ volumes) are charged separately at a rate of $0.009 TiB/hr or $9.00E-6 GiB/hr. More about cost can be found here and some of the common billing related FAQs are listed here.

"},{"location":"openstack/create-and-connect-to-the-VM/flavors/#how-can-i-get-customized-a100-sxm4-gpus-not-listed-in-the-current-flavors","title":"How can I get customized A100 SXM4 GPUs not listed in the current flavors?","text":"

We also provide customized A100 SXM4 GPU-based flavors, which are not publicly listed on our NVIDIA A100 SXM4 40GB GPU Tiers list. These options are exclusively available for demanding projects and are subject to availability.

To request access, please fill out this form. Our team will review your request and reach out to you to discuss further.

"},{"location":"openstack/create-and-connect-to-the-VM/flavors/#how-to-change-flavor-of-an-instance","title":"How to Change Flavor of an instance","text":""},{"location":"openstack/create-and-connect-to-the-VM/flavors/#using-horizon-dashboard","title":"Using Horizon dashboard","text":"

Once you're logged in to NERC's Horizon dashboard, you can navigate to Project -> Compute -> Instances.

You can select the instance you wish to extend or change the flavor. Here, you will see several options available under the Actions menu located on the right-hand side of your instance, as shown here:

Click \"Resize Instance\".

In the Resize Instance dialog box, select the new flavor of your choice under the \"New Flavor\" dropdown options. In this example, we are changing the current flavor \"cpu-su.1\" to the new flavor \"cpu-su.2\" for our VM, as shown below:

Once reviwing the new flavor details and verified all details, press \"Resize\" button.

Very Important Information

You will only be able to choose flavors that are within your current available resource quotas, i.e., vCPUs and RAM.

You will see the status of the resize in the following page.

When it says \"Confirm or Revert Resize/Migrate\", login to the instance and verify that it worked as intended (meaning the instance is working as before but with the new flavor).

If you are happy with the result, press \"Confirm Resize/Rigrate\" in drop-down to the far right (it should be pre-selected) as shown below:

This will finalise the process and make it permanent.

If you are unhappy (for some reason the process failed), you are able to instead press \"Revert resize/Migrate\" (available in the drop-down). This will revert the process.

"},{"location":"openstack/create-and-connect-to-the-VM/flavors/#using-the-cli","title":"Using the CLI","text":"

Prerequisites:

To run the OpenStack CLI commands, you need to have:

  • OpenStack CLI setup, see OpenStack Command Line setup for more information.

If you want to change the flavor that is bound to a VM, then you can run the following openstack client commands, here we are changing flavor of an existing VM i.e. named \"test-vm\" from mem-su.2 to mem-su.4:

First, stop the running VM using:

openstack server stop test-vm\n

Then, verify the status is \"SHUTOFF\" and also the used flavor is mem-su.2 as shown below:

openstack server list\n+--------------------------------------+------+---------+--------------------------------------------+--------------------------+---------+\n| ID | Name | Status | Networks | Image | Flavor |\n+--------------------------------------+------+---------+--------------------------------------------+--------------------------+---------+\n| cd51dbba-fe95-413c-9afc-71370be4d4fd | test-vm | SHUTOFF | default_network=192.168.0.58, 199.94.60.10 | N/A (booted from volume) | mem-su.2 |\n+--------------------------------------+------+---------+--------------------------------------------+--------------------------+---------+\n

Then, resize the flavor from mem-su.2 to mem-su.4 by running:

openstack server resize --flavor mem-su.4 cd51dbba-fe95-413c-9afc-71370be4d4fd\n

Confirm the resize:

openstack server resize confirm cd51dbba-fe95-413c-9afc-71370be4d4fd\n

Then, start the VM:

openstack server start cd51dbba-fe95-413c-9afc-71370be4d4fd\n

Verify the VM is using the new flavor of mem-su.4 as shown below:

openstack server list\n+--------------------------------------+------+--------+--------------------------------------------+--------------------------+---------+\n| ID | Name | Status | Networks | Image | Flavor |\n+--------------------------------------+------+--------+--------------------------------------------+--------------------------+---------+\n| cd51dbba-fe95-413c-9afc-71370be4d4fd | test-vm | ACTIVE | default_network=192.168.0.58, 199.94.60.10 | N/A (booted from volume) | mem-su.4 |\n+--------------------------------------+------+--------+--------------------------------------------+--------------------------+---------+\n
"},{"location":"openstack/create-and-connect-to-the-VM/images/","title":"Images","text":"

Image composed of a virtual collection of a kernel, operating system, and configuration.

"},{"location":"openstack/create-and-connect-to-the-VM/images/#glance","title":"Glance","text":"

Glance is the API-driven OpenStack image service that provides services and associated libraries to store, browse, register, distribute, and retrieve bootable disk images. It acts as a registry for virtual machine images, allowing users to copy server images for immediate storage. These images can be used as templates when setting up new instances.

"},{"location":"openstack/create-and-connect-to-the-VM/images/#nerc-images-list","title":"NERC Images List","text":"

Once you're logged in to NERC's Horizon dashboard.

Navigate to Project -> Compute -> Images.

NERC provides a set of default images that can be used as source while launching an instance:

ID Name a9b48e65-0cf9-413a-8215-81439cd63966 MS-Windows-2022 cfecb5d4-599c-4ffd-9baf-9cbe35424f97 almalinux-8-x86_64 263f045e-86c6-4344-b2de-aa475dbfa910 almalinux-9-x86_64 41fa5991-89d5-45ae-8268-b22224c772b2 debian-10-x86_64 99194159-fcd1-4281-b3e1-15956c275692 fedora-36-x86_64 74a33f77-fc42-4dd1-a5a2-55fb18fc50cc rocky-8-x86_64 d7d41e5f-58f4-4ba6-9280-7fef9ac49060 rocky-9-x86_64 75a40234-702b-4ab7-9d83-f436b05827c9 ubuntu-18.04-x86_64 8c87cf6f-32f9-4a4b-91a5-0d734b7c9770 ubuntu-20.04-x86_64 da314c41-19bf-486a-b8da-39ca51fd17de ubuntu-22.04-x86_64 17912292-8861-489a-b37e-bb78e15b934a ubuntu-24.04-x86_64"},{"location":"openstack/create-and-connect-to-the-VM/images/#how-to-create-and-upload-own-custom-images","title":"How to create and upload own custom images?","text":"

Beside the above mentioned system provided images users can customize and upload their own images to the NERC, as documented in this documentation.

Please refer to this guide to learn more about how to obtain other publicly available virtual machine images for the NERC OpenStack platform within your project space.

"},{"location":"openstack/create-and-connect-to-the-VM/launch-a-VM/","title":"How to launch an Instance","text":"

Prerequisites:

  • You followed the instruction in Create a Key Pair to set up a public ssh key.

  • Make sure you have added rules in the Security Groups to allow ssh using Port 22 access to the instance.

"},{"location":"openstack/create-and-connect-to-the-VM/launch-a-VM/#using-horizon-dashboard","title":"Using Horizon dashboard","text":"

Once you're logged in to NERC's Horizon dashboard.

Navigate: Project -> Compute -> Instances.

Click on \"Launch Instance\" button:

In the Launch Instance dialog box, specify the following values:

"},{"location":"openstack/create-and-connect-to-the-VM/launch-a-VM/#details-tab","title":"Details Tab","text":"

Instance Name: Give your instance a name that assign a name to the virtual machine.

Important Note

The instance name you assign here becomes the initial host name of the server. If the name is longer than 63 characters, the Compute service truncates it automatically to ensure dnsmasq works correctly.

Availability Zone: By default, this value is set to the availability zone given by the cloud provider i.e. nova.

Count: To launch multiple instances, enter a value greater than 1. The default is 1.

"},{"location":"openstack/create-and-connect-to-the-VM/launch-a-VM/#source-tab","title":"Source Tab","text":"

Double check that in the dropdown \"Select Boot Source\".

When you start a new instance, you can choose the Instance Boot Source from the following list:

  • boot from image

  • boot from instance snapshot

  • boot from volume

  • boot from volume snapshot

In its default configuration, when the instance is launched from an Image or an Instance Snapshot, the choice for utilizing persistent storage is configured by selecting the Yes option for \"Create New Volume\". Additionally, the \"Delete Volume on Instance Delete\" setting is pre-set to No, as indicated here:

If you set the \"Create New Volume\" option to No, the instance will boot from either an image or a snapshot, with the instance only being attached to an ephemeral disk as described here. To mitigate potential data loss, we strongly recommend regularly taking a snapshot of such a running ephemeral instance, referred to as an \"instance snapshot\", especially if you want to safeguard or recover important states of your instance.

When deploying a non-ephemeral instance, which involves creating a new volume and selecting Yes for \"Delete Volume on Instance Delete\", deleting the instance will also remove the associated volume. Consequently, all data on that disk is permanently lost, which is undesirable when the data on attached volumes needs to persist even after the instance is deleted. Ideally, selecting \"Yes\" for this setting should be reserved for instances where persistent data storage is not required.

Very Important: How do you make your VM setup and data persistent?

For more in-depth information on making your VM setup and data persistent, you can explore the details here.

To start a VM, for the first time we will need a base image so, please make sure \"Image\" dropdown option is selected. In the example, we chose ubuntu-22.04-x86_64, you may choose any available images.

Bootable Images

NERC has made several Public bootable images available to the users as listed here. Customers can also upload their own custom images, as documented in this guide.

To view them, Navigate: Project -> Compute -> Images.

How to override the flavor's Default root disk volume size

If you don't specify custom value for the \"Volume Size (GB)\", that will be set to the root disk size of your selected Flavor. For more about the default root disk size you can refer to this documentation. We can override this value by entering our own custom value (in GiB) and that is available as a Volume that is attach to the instance to enable persistent storage.

"},{"location":"openstack/create-and-connect-to-the-VM/launch-a-VM/#flavor-tab","title":"Flavor Tab","text":"

Specify the size of the instance to launch. Choose cpu-su.4 from the 'Flavor' tab by clicking on the \"+\" icon.

Important Note

In NERC OpenStack, flavors define the compute, memory, and storage capacity of nova computing instances. In other words, a flavor is an available hardware configuration for a server.

Some of the flavors will not be available for your use as per your resource Quota limits and will be shown as below:

NOTE: More details about available flavors can be found here and how to change request the current allocation quota attributes can be found here.

After choosing cpu-su.4, you should see it moved up to \"Allocated\".

Storage and Volume

  • System disks are the first disk based on the flavor disk space and are generally used to store the operating system created from an image when the virtual machine is booted.

  • Volumes are persistent virtualized block devices independent of any particular instance. Volumes may be attached to a single instance at a time, but may be detached or reattached to a different instance while retaining all data, much like a USB drive. The size of the volume can be selected when it is created within the storage quota limits for the particular resource allocation.

"},{"location":"openstack/create-and-connect-to-the-VM/launch-a-VM/#networks-tab","title":"Networks Tab","text":"

Make sure the Default Network that is created by default is moved up to \"Allocated\". If not, you can click on the \"+\" icon in \"Available\".

"},{"location":"openstack/create-and-connect-to-the-VM/launch-a-VM/#security-groups-tab","title":"Security Groups Tab","text":"

Make sure to add the security group where you enabled SSH. To add an SSH security group first, see here.

How to update New Security Group(s) on any running VM?

If you want to attach/deattach any new Security Group(s) to/from a running VM after it has launched. First create all new Security Group(s) with all the rules required. Following this guide, you'll be able to attach created security group(s) with all the required rules to a running VM. You can modify the Rules setup for any Security Group(s) but that will affect all VMs using that security groups.

"},{"location":"openstack/create-and-connect-to-the-VM/launch-a-VM/#key-pair-tab","title":"Key Pair Tab","text":"

Add the key pair you created for your local machine/laptop to use with this VM. To add a Key Pair first create and add them to your Project as described here.

Important Note

If you did not provide a key pair, security groups, or rules, users can access the instance only from inside the cloud through VNC. Even pinging the instance is not possible without an ICMP rule configured. We recommend limiting access as much as possible for best security practices.

"},{"location":"openstack/create-and-connect-to-the-VM/launch-a-VM/#ignore-other-tabs","title":"Ignore other Tabs","text":"

Network Ports, Configuration, Server Groups, Schedular Hints, and Metadata: tab: Please ignore these tabs as these are not important and only for advance setup.

How to use 'Configuration' tab

If you want to specify a customization script that runs after your instance launches then you can write those custom script inside the \"Customization Script\" text area. For example:

You are now ready to launch your VM - go ahead and click \"Launch Instance\". This will initiate an instance.

On a successful launch you would be redirected to Compute -> Instances tab and can see the VM spawning.

Once your VM is successfully running you will see the Power State changes from \"No State\" to \"running\".

Note

Here we explained about launching an instance using Image but you can also launch an instance from the \"instance snapshot\" or \"volume\" or \"volume snapshot\" option similar to the steps above. If you want to use OpenStack CLI to launch a VM you can read this or if you want to provision the NERC resources using Terraform you can read this.

"},{"location":"openstack/create-and-connect-to-the-VM/ssh-to-the-VM/","title":"SSH to the VM","text":"

Shell, or SSH, is used to administering and managing Linux workloads. Before trying to access instances from the outside world, you need to make sure you have followed these steps:

  • You followed the instruction in Create a Key Pair to set up a public ssh key.

  • Your public ssh-key has selected (in \"Key Pair\" tab) while launching the instance.

  • Assign a Floating IP to the instance in order to access it from outside world.

  • Make sure you have added rules in the Security Groups to allow ssh using Port 22 access to the instance.

How to update New Security Group(s) on any running VM?

If you want to attach/deattach any new Security Group(s) to/from a running VM after it has launched. First create all new Security Group(s) with all the rules required. Following this guide, you'll be able to attach created security group(s) with all the required rules to a running VM.

Make a note of the Floating IP you have associated to your instance.

In our example, the IP is 199.94.60.66.

Default usernames for all the base images are:

  • all Ubuntu images: ubuntu

  • all AlmaLinux images: almalinux

  • all Rocky Linux images: rocky

  • all Fedora images: fedora

  • all Debian images: debian

  • all RHEL images: cloud-user

Removed Centos Images

If you still have VMs running with deleted CentOS images, you need to use the following default username for your CentOS images: centos.

  • all CentOS images: centos

Our example VM was launched with the ubuntu-22.04-x86_64 base image, the user we need is 'ubuntu'.

Open a Terminal window and type:

ssh ubuntu@199.94.60.66\n

Since you have never connected to this VM before, you will be asked if you are sure you want to connect. Type yes.

Important Note

If you haven't added your key to ssh-agent, you may need to specify the private key file, like this: ssh -i ~/.ssh/cloud.key ubuntu@199.94.60.66

To add your private key to the ssh-agent you can follow the following steps:

  1. eval \"$(ssh-agent -s)\"

    Output: Agent pid 59566

  2. ssh-add ~/.ssh/cloud.key

    If your private key is password protected, you'll be prompted to enter the passphrase.

  3. Verify that the key has been added by running ssh-add -l.

"},{"location":"openstack/create-and-connect-to-the-VM/ssh-to-the-VM/#ssh-to-the-vm-using-ssh-config","title":"SSH to the VM using SSH Config","text":"

Alternatively, You can also configure the setting for the remote instances in your SSH configuration file (typically found in ~/.ssh/config). The SSH configuration file might include entry for your newly launched VM like this:

Host ExampleHostLabel\n    HostName 199.94.60.66\n    User ubuntu\n    IdentityFile ~/.ssh/cloud.key\n

Here, the Host value can be any label you want. The HostName value is the Floating IP you have associated to your instance that you want to access, the User value specifies the default account username based on your base OS image used for the VM and IdentityFile specify the path to your Private Key on your local machine. With this configuration defined, you can connect to the account by simply using the Host value set as \"ExampleHostLabel\". You do not have to type the username, hostname, and private key each time.

So, you can SSH into your host VM by running:

ssh ExampleHostLabel\n
"},{"location":"openstack/create-and-connect-to-the-VM/ssh-to-the-VM/#setting-a-password","title":"Setting a password","text":"

When the VMs are launched, a strong, randomly-generated password is created for the default user, and then discarded.

Once you connect to your VM, you will want to set a password in case you ever need to log in via the console in the web dashboard.

For example, if your network connections aren't working correctly.

Setting a password is necessary to use Remote Desktop Protocol (RDP)

Remote Desktop Protocol(RDP) is widely used for Windows remote connections, but you can also access and interact with the graphical user interface of a remote Linux server by using a tool like xrdp, an open-source implementation of the RDP server. You can use xrdp to remotely access the Linux desktop. To do so, you need to utilize the RDP client. Moreover, xrdp delivers a login to the remote machines employing Microsoft RDP. This is why a user with the password is necessary to access the VM. You can refer to this guide on how to install and configure a RDP server using xrdp on a Ubuntu server and access it using a RDP client from your local machine.

Since you are not using it to log in over SSH or to sudo, it doesn't really matter how hard it is to type, and we recommend using a randomly-generated password.

Create a random password like this:

ubuntu@test-vm:~$ cat /dev/urandom | base64 | dd count=14 bs=1\nT1W16HCyfZf8V514+0 records in\n14+0 records out\n14 bytes copied, 0.00110367 s, 12.7 kB/s\n

The 'count' parameter controls the number of characters.

The first [count] characters of the output are your randomly generated output, followed immediately by \"[count]+0\", so in the above example the password is: T1W16HCyfZf8V5.

Set the password for ubuntu using the command:

ubuntu@test-vm:~$ sudo passwd ubuntu\nNew password:\nRetype new password:\n... password updated successfully\n

Store the password in a secure place. Don't send it over email, post it on your wall on a sticky note, etc.

"},{"location":"openstack/create-and-connect-to-the-VM/ssh-to-the-VM/#adding-other-peoples-ssh-keys-to-the-instance","title":"Adding other people's SSH keys to the instance","text":"

You were able to log in using your own SSH key.

Right now Openstack only permits one key to be added at launch, so you need to add your teammates keys manually.

Get your teammates' public keys. If they used ssh-keygen to create their key, this will be in a file called .pub on their machine.

If they created a key via the dashboard, or imported the key created with ssh-keygen, their public key is viewable from the Key Pairs tab.

Click on the key pair name. The public key starts with 'ssh-rsa' and looks something like this:

ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDL6O5qNZHfgFwf4vnnib2XBub7ZU6khy6z6JQl3XRJg6I6gZ\n+Ss6tNjz0Xgax5My0bizORcka/TJ33S36XZfzUKGsZqyEl/ax1Xnl3MfE/rgq415wKljg4\n+QvDznF0OFqXjDIgL938N8G4mq/\ncKKtRSMdksAvNsAreO0W7GZi24G1giap4yuG4XghAXcYxDnOSzpyP2HgqgjsPdQue919IYvgH8shr\n+sPa48uC5sGU5PkTb0Pk/ef1Y5pLBQZYchyMakQvxjj7hHZaT/\nLw0wIvGpPQay84plkjR2IDNb51tiEy5x163YDtrrP7RM2LJwXm+1vI8MzYmFRrXiqUyznd\ntest_user@demo\n

Create a file called something like 'teammates.txt' and paste in your team's public keys, one per line.

Hang onto this file to save yourself from having to do all the copy/pasting every time you launch a new VM.

Copy the file to the vm:

[you@your-laptop ~]$ scp teammates.txt ubuntu@199.94.60.66:~\n

If the copy works, you will see the output:

teammates.txt                  100%    0     0KB/s   00:00\n

Append the file's contents to authorized_keys:

[cloud-user@test-vm ~] #cat teammates.txt >> ~/.ssh/authorized_keys\n

Now your teammates should also be able to log in.

Important Note

Make sure to use >> instead of > to avoid overwriting your own key.

"},{"location":"openstack/create-and-connect-to-the-VM/ssh-to-the-VM/#adding-users-to-the-instance","title":"Adding users to the instance","text":"

You may decide that each teammate should have their own user on the VM instead of everyone logging in to the default user.

Once you log into the VM, you can create another user like this.

Note

The 'sudo_group' is different for different OS - in CentOS and Red Hat, the group is called 'wheel', while in Ubuntu, the group is called 'sudo'.

sudo su\n# useradd -m <username>\n# passwd <username>\n# usermod -aG <sudo_group> <username>    <-- skip this step for users who\n# should not have root access\n# su username\ncd ~\nmkdir .ssh\nchmod 700 .ssh\ncd .ssh\nvi authorized_keys   <-- paste the public key for that user in this file\nchmod 600 authorized_keys\n
"},{"location":"openstack/create-and-connect-to-the-VM/ssh-to-the-VM/#how-to-enable-remote-desktop-protocol-using-xrdp-on-ubuntu","title":"How To Enable Remote Desktop Protocol Using xrdp on Ubuntu","text":""},{"location":"openstack/create-and-connect-to-the-VM/ssh-to-the-VM/#log-in-to-the-server-with-sudo-access","title":"Log in to the server with Sudo access","text":"

In order to install the xrdp, you need to login to the server with sudo access to it.

ssh username@your_server_ip\n

For example:

ssh ubuntu@199.94.60.66\n
"},{"location":"openstack/create-and-connect-to-the-VM/ssh-to-the-VM/#installing-a-desktop-environment","title":"Installing a Desktop Environment","text":"

After connecting to your server using SSH update the list of available packages using the following command:

sudo apt update\n

Next, install the xfce and xfce-goodies packages on your server:

sudo apt install xfce4 xfce4-goodies -y\n

Select Display Manager

If prompted to choose a display manager, which manages graphical login mechanisms and user sessions, you can select any option from the list of available display managers. For instance, here we have gdm3 as the default selection.

"},{"location":"openstack/create-and-connect-to-the-VM/ssh-to-the-VM/#installing-xrdp","title":"Installing xrdp","text":"

To install xrdp, run the following command in the terminal:

sudo apt install xrdp -y\n

After installing xrdp, verify the status of xrdp using systemctl:

sudo systemctl status xrdp\n

This command will show the status as active (running):

Output:

\u25cf xrdp.service - xrdp daemon\n    Loaded: loaded (/lib/systemd/system/xrdp.service; enabled; vendor preset: enab>\n    Active: active (running) since Mon 2024-02-12 21:33:01 UTC; 9s ago\n    ...\n    CGroup: /system.slice/xrdp.service\n            \u2514\u25008839 /usr/sbin/xrdp\n

What if xrdp is not Running?

If the status of xrdp is not running, you may have to start the service manually with this command: sudo systemctl start xrdp. After executing the above command, verify the status again to ensure xrdp is in a running state.

Make xrdp use the desktop environment we previously created:

sudo sed -i.bak '/fi/a #xrdp multiple users configuration \\n xfce-session \\n' /etc/xrdp/startwm.sh\n
"},{"location":"openstack/create-and-connect-to-the-VM/ssh-to-the-VM/#configuring-xrdp-and-updating-security-groups","title":"Configuring xrdp and Updating Security Groups","text":"

If you want to customize the default xrdp configuration (optional), you will need to review the default configuration of xrdp, which is stored under /etc/xrdp/xrdp.ini. xrdp.ini is the default configuration file to set up RDP connections to the xrdp server. The configuration file can be modified and customized to meet the RDP connection requirements.

Add a new security group with a RDP (port 3389) rule open to the public for a RDP connection and attach that security group to your instance as described here.

How to Update Security Group(s) on a Running VM?

Following this guide, you'll be able to attach created security group(s) with all the required rules to a running VM.

Restart the xrdp server to make sure all the above changes are reflected:

sudo systemctl restart xrdp\n
"},{"location":"openstack/create-and-connect-to-the-VM/ssh-to-the-VM/#testing-the-rdp-connection","title":"Testing the RDP Connection","text":"

You should now be able to connect to the Ubuntu VM via xrdp.

"},{"location":"openstack/create-and-connect-to-the-VM/ssh-to-the-VM/#testing-the-rdp-connection-on-windows","title":"Testing the RDP Connection on Windows","text":"

If you are using Windows as a local desktop, Windows users have a RDP connection application by default on their machines.

Enter your VM's Floating IP and username into the fillable text boxes for Computer and User name.

You may need to press the down arrow for \"Show Options\" to input the username i.e. ubuntu:

Press the Connect button. If you receive an alert that the \"Remote Desktop can't connect to the remote computer\", check that you have properly attached the security group with a RDP (port 3389) rule open to the public to your VM as described here.

Press Yes if you receive the identity verification popup:

Then, enter your VM's username (ubuntu) and the password you created for user ubuntu following this steps.

Press Ok.

Once you have logged in, you should be able to access your Ubuntu Desktop environment:

"},{"location":"openstack/create-and-connect-to-the-VM/ssh-to-the-VM/#testing-the-rdp-connection-on-macos","title":"Testing the RDP Connection on macOS","text":"

To test the connection using the Remote Desktop Connection client on macOS, first launch the Microsoft Remote Desktop Connection app.

Press Add PC, then enter your remote server's Floating IP in the PC name fillable box:

You can Add a user account when setting up the connection:

Once you have logged in, you can access your Ubuntu remote desktop. You can close it with the exit button.

"},{"location":"openstack/create-and-connect-to-the-VM/ssh-to-the-VM/#testing-the-rdp-connection-on-linux","title":"Testing the RDP Connection on Linux","text":"

If you are using Linux as your Local desktop you can connect to the server via Remmina.

"},{"location":"openstack/create-and-connect-to-the-VM/bastion-host-based-ssh/","title":"Bastion Host","text":"

A bastion host is a server that provides secure access to private networks over SSH from an external network, such as the Internet. We can leverage a bastion host to record all SSH sessions established with private network instances which enables auditing and can help us in efforts to comply with regulatory requirements.

The following diagram illustrates the concept of using an SSH bastion host to provide access to Linux instances running inside OpenStack cloud network.

In OpenStack, users can deploy instances in a private tenant network. In order to make these instances to be accessible externally via internet, the tenant must assign each instance a Floating IP address i.e., an external public IP. Nevertheless, users may still want a way to deploy instances without having to assign a Floating IP address for every instance.

This is useful in the context of an OpenStack project as you don't necessarily want to reserve a Floating IP for all your instances. This way you can isolate certain resources so that there is only a single point of access to them and conserve Floating IP addresses so that you don't need as big of a quota.

Leveraging an SSH bastion host allows this sort of configuration while still enabling SSH access to the private instances.

Before trying to access instances from the outside world using SSH tunneling via Bastion Host, you need to make sure you have followed these steps:

  • You followed the instruction in Create a Key Pair to set up a public ssh key. You can use the same key for both the bastion host and the remote instances, or different keys; you'll just need to ensure that the keys are loaded by ssh-agent appropriately so they can be used as needed. Please read this instruction on how to add ssh-agent and load your private key using ssh-add command to access the bastion host.

Verify you have an SSH agent running. This should match whatever you built your cluster with.

ssh-add -l\n

If you need to add the key to your agent:

ssh-add path/to/private/key\n

Now you can SSH into the bastion host:

ssh -A <user>@<bastion-floating-IP>\n
  • Your public ssh-key was selected (in the Access and Security tab) while launching the instance.

  • Add two Security Groups, one will be used by the Bastion host and another one will be used by any private instances.

i. Bastion Host Security Group:

Allow inbound SSH (optional ICMP) for this security group. Make sure you have added rules in the Security Groups to allow ssh to the bastion host.

ii. Private Instances Security Group:

You need to select \"Security Group\" in Remote dropdown option, and then select the \"Bastion Host Security Group\" under Security Group option as shown below:

  • Assign a Floating IP to the Bastion host instance in order to access it from outside world.

Make a note of the Floating IP you have associated to your instance.

While adding the Bastion host and private instance, please select appropriate Security Group as shown below:

private1:

bastion_host_demo:

Finally, you'll want to configure the ProxyJump setting for the remote instances in your SSH configuration file (typically found in ~/.ssh/config). In SSH configuration file, we can define multiple hosts by pet names, specify custom ports, hostnames, users, etc. For example, let's say that you had a remote instance named \"private1\" and you wanted to run SSH connections through a bastion host called \"bastion\". The appropriate SSH configuration file might look something like this:

Host bastion\n  HostName 140.247.152.139\n  User ubuntu\n\nHost private1\n  Hostname 192.168.0.40\n  User ubuntu\n  ProxyJump bastion\n

ProxyJump makes it super simple to jump from one host to another totally transparently.

OR,

if you don't have keys loaded by ssh-add command starting ssh-agent on your local machine. you can load the private key using IdentityFile variable in SSH configuration file as shown below:

Host private1\n  Hostname 192.168.0.40\n  User ubuntu\n  IdentityFile ~/.ssh/cloud.key\n  ProxyJump bastion\n\nHost bastion\n  HostName 140.247.152.139\n  User ubuntu\n  IdentityFile ~/.ssh/cloud.key\n

With this configuration in place, when you type ssh private1 SSH will establish a connection to the bastion host and then through the bastion host connect to \"private1\", using the agent added keys or specified private keys.

In this sort of arrangement, SSH traffic to private servers that are not directly accessible via SSH is instead directed through a bastion host, which proxies the connection between the SSH client and the remote servers. The bastion host runs on an instance that is typically in a public subnet with attached floating public IP. Private instances are in a subnet that is not publicly accessible, and they are set up with a security group that allows SSH access from the security group attached to the underlying instance running the bastion host.

The user won't see any of this; he or she will just see a shell for \"private1\" appear. If you dig a bit further, though (try running who on the remote node), you'll see the connections are coming from the bastion host, not the original SSH client.

"},{"location":"openstack/create-and-connect-to-the-VM/using-vpn/openvpn/","title":"OpenVPN","text":"

OpenVPN is a full-featured SSL VPN which implements OSI layer 2 or 3 secure network extension using the industry standard SSL/TLS protocol, supports flexible client authentication methods based on certificates, smart cards, and/ or username/password credentials, and allows user or group-specific access control policies using firewall rules applied to the VPN virtual interface.

OpenVPN offers a scalable client/server mode, allowing multiple clients to connect to a single OpenVPN server process over a single TCP or UDP port.

"},{"location":"openstack/create-and-connect-to-the-VM/using-vpn/openvpn/#installing-openvpn-server","title":"Installing OpenVPN Server","text":"

You can read official documentation here.

You can spin up a new instance with \"ubuntu-22.04-x86_64\" or any available Ubuntu OS image, named \"openvpn_server\" on OpenStack, with \"default\" and \"ssh_only\" Security Groups attached to it.

Also, attach a Floating IP to this instance so you can ssh into it from outside.

Create a new Security Group i.e. \"openvpn\" that is listening on UDP port 1194 as shown below:

The Security Groups attached to the OpenVPN server should look similar to the image below:

The Security Groups attached to the OpenVPN server includes \"default\", \"ssh_only\" and \"openvpn\". It should look similar to the image shown below:

Finally, you'll want to configure the setting for the remote instances in your SSH configuration file (typically found in ~/.ssh/config). The SSH configuration file might include entry for your newly created OpenVPN server like this:

Host openvpn\n  HostName 199.94.60.66\n  User ubuntu\n  IdentityFile ~/.ssh/cloud.key\n
  1. Then you can ssh into the OpenVPN Server running: ssh openvpn

  2. Also note that OpenVPN must be installed and run by a user who has administrative/root privileges. So, we need to run the command: sudo su

  3. We are using this repo to install OpenVPN server on this ubuntu server.

    For that, run the script and follow the assistant:

    wget https://git.io/vpn -O openvpn-install.sh && bash openvpn-install.sh\n

    You can press Enter for all default values. And, while entering a name for the first client you can give \"nerc\" as the client name, this will generate a new configuration file (.ovpn file) named as \"nerc.ovpn\". Based on your client's name it will name the config file as \".ovpn\"

  4. Copy the generated config file from \"/root/nerc.ovpn\" to \"/home/ubuntu/ nerc.ovpn\" by running: cp /root/nerc.ovpn .

  5. Update the ownership of the config file to ubuntu user and ubuntu group by running the following command: chown ubuntu:ubuntu nerc.ovpn

  6. You can exit from the root and ssh session all together and then copy the configuration file to your local machine by running the following script on your local machine's terminal: scp openvpn:nerc.ovpn .

  7. "},{"location":"openstack/create-and-connect-to-the-VM/using-vpn/openvpn/#to-add-a-new-client-user","title":"To add a new client user","text":"

    Once it ends, you can run it again to add more users, remove some of them or even completely uninstall OpenVPN.

    For this, run the script and follow the assistant:

    wget https://git.io/vpn -O openvpn-install.sh && bash openvpn-install.sh\n

    Here, you are giving client name as \"mac_client\" and that will generate a new configuration file at \"/root/mac_client.ovpn\". You can repeat above steps: 4 to 6 to copy this new client's configuration file and share it to the new client.

    Important Note

    You need to contact your project administrator to get your own OpenVPN configuration file (file with .ovpn extension). Download it and Keep it in your local machine so in next steps we can use this configuration client profile file.

    A OpenVPN client or compatible software is needed to connect to the OpenVPN server. Please install one of these clients depending on your device. The client program must be configured with a client profile to connect to the OpenVPN server.

    "},{"location":"openstack/create-and-connect-to-the-VM/using-vpn/openvpn/#windows","title":"Windows","text":"

    OpenVPN source code and Windows installers can be downloaded here. The OpenVPN executable should be installed on both server and client machines since the single executable provides both client and server functions. Please see the OpenVPN client setup guide for Windows.

    "},{"location":"openstack/create-and-connect-to-the-VM/using-vpn/openvpn/#mac-os-x","title":"Mac OS X","text":"

    The client we recommend and support for Mac OS is Tunnelblick. To install Tunnelblick, download the dmg installer file from the Tunnelblick site, mount the dmg, and drag the Tunnelblick application to Applications. Please refer to this guide for more information.

    "},{"location":"openstack/create-and-connect-to-the-VM/using-vpn/openvpn/#linux","title":"Linux","text":"

    OpenVPN is available through the package management system on most Linux distributions.

    On Debian/Ubuntu:

    sudo apt-get install openvpn\n

    On RedHat/Rocky/AlmaLinux:

    sudo dnf install openvpn\n

    Then, to run OpenVPN using the client profile:

    Move the VPN client profile (configuration) file to /etc/openvpn/ :

    sudo mv nerc.ovpn /etc/openvpn/client.conf\n

    Restart the OpenVPN daemon (i.e., This will start OpenVPN connection and will automatically run on boot):

    sudo /etc/init.d/openvpn start\n

    OR,

    sudo systemctl enable --now openvpn@client\nsudo systemctl start openvpn@client\n

    Checking the status:

    systemctl status openvpn@client\n

    Alternatively, if you want to run OpenVPN manually each time, then run:

    sudo openvpn --config /etc/openvpn/client.ovpn\n

    OR,

    sudo openvpn --config nerc.ovpn\n
    "},{"location":"openstack/create-and-connect-to-the-VM/using-vpn/openvpn/openvpn_gui_for_windows/","title":"OpenVPN-GUI","text":"

    Official OpenVPN Windows installers include a Windows OpenVPN-GUI, which allows managing OpenVPN connections from a system tray applet.

    "},{"location":"openstack/create-and-connect-to-the-VM/using-vpn/openvpn/openvpn_gui_for_windows/#find-your-client-account-credentials","title":"Find your client account credentials","text":"

    You need to contact your project administrator to get your own OpenVPN configuration file (file with .ovpn extension). Download it and Keep it in your local machine so in next steps we can use this configuration client profile file.

    "},{"location":"openstack/create-and-connect-to-the-VM/using-vpn/openvpn/openvpn_gui_for_windows/#download-and-install-openvpn-gui","title":"Download and install OpenVPN-GUI","text":"
    1. Download the OpenVPN client installer:

      OpenVPN for Windows can be installed from the self-installing exe file on the OpenVPN download page. Also note that OpenVPN must be installed and run by a user who has administrative privileges (this restriction is imposed by Windows, not OpenVPN)

    2. Launch the installer and follow the prompts as directed.

    3. Clicking \"Customize\" button we can see settings and features of OpenVPN GUI client.

    4. Click \"Install Now\" to continue.

    5. Click \"Close\"button.

    6. For the newly installed OpenVPN GUI there will be no configuration profile for the client so it will show a pop up that alerts:

    "},{"location":"openstack/create-and-connect-to-the-VM/using-vpn/openvpn/openvpn_gui_for_windows/#set-up-the-vpn-with-openvpn-gui","title":"Set up the VPN with OpenVPN GUI","text":"

    After you've run the Windows installer, OpenVPN is ready for use and will associate itself with files having the .ovpn extension.

    1. You can use the previously downloaded .ovpn file from your Downloads folder to setup the connection profiles.

      a. Either you can Right click on the OpenVPN configuration file (.ovpn) and select \"Start OpenVPN on this config file\":

      b. OR, you can use \"Import file\u2026\" menu to select the previously downloaded .ovpn file.

      Once, done it will show:

      c. OR, you can manually copy the config file to one of OpenVPN's configuration directories:

      C:\\Program Files\\OpenVPN\\config (global configs)\nC:\\Program Files\\OpenVPN\\config-auto (autostarted global configs)\n%USERPROFILE%\\OpenVPN\\config (per-user configs)\n
    "},{"location":"openstack/create-and-connect-to-the-VM/using-vpn/openvpn/openvpn_gui_for_windows/#connect-to-a-vpn-server-location","title":"Connect to a VPN server location","text":"

    For launching OpenVPN Connections you click on OpenVPN GUI (tray applet). OpenVPN GUI is used to launching VPN connections on demand. OpenVPN GUI is a system-tray applet, so an icon for the GUI will appear in the lower-right corner of the screen located at the taskbar notification area. Right click on the system tray icon, and if you have multiple configurations then a menu should appear showing the names of your OpenVPN configuration profiles and giving you the option to connect. If you have only one configuration then you can just click on \"Connect\" menu.

    When you are connected to OpenVPN server successfully, you will see popup message as shown below. That's it! You are now connected to a VPN.

    Once you are connected to the OpenVPN server, you can run commands like shown below in your terminal to connect to the private instances: ssh ubuntu@192.168. 0.40 -A -i cloud.key

    "},{"location":"openstack/create-and-connect-to-the-VM/using-vpn/openvpn/openvpn_gui_for_windows/#disconnect-vpn-server","title":"Disconnect VPN server","text":"

    To disconnect, right click on the system tray icon, in your status bar and select Disconnect from the menu.

    "},{"location":"openstack/create-and-connect-to-the-VM/using-vpn/openvpn/tunnelblick_for_macos/","title":"Tunnelblick","text":"

    Tunnelblick is a free, open-source GUI (graphical user interface) for OpenVPN on macOS and OS X: More details can be found here. Access to a VPN server \u2014 your computer is one end of the tunnel and the VPN server is the other end.

    "},{"location":"openstack/create-and-connect-to-the-VM/using-vpn/openvpn/tunnelblick_for_macos/#find-your-client-account-credentials","title":"Find your client account credentials","text":"

    You need to contact your project administrator to get your own OpenVPN configuration file (file with .ovpn extension). Download it and Keep it in your local machine so in next steps we can use this configuration client profile file.

    "},{"location":"openstack/create-and-connect-to-the-VM/using-vpn/openvpn/tunnelblick_for_macos/#download-and-install-tunnelblick","title":"Download and install Tunnelblick","text":"
    1. Download Tunnelblick, a free and user-friendly app for managing OpenVPN connections on macOS.

    2. Navigate to your Downloads folder and double-click the Tunnelblick installation file (.dmg installer file) you have just downloaded.

    3. In the window that opens, double-click on the Tunnelblick icon.

    4. A new dialogue box will pop up, asking you if you are sure you want to open the app. Click Open.

    5. You will be asked to enter your device password. Enter it and click OK:

    6. Select Allow or Don't Allow for your notification preference.

    7. Once the installation is complete, you will see a pop-up notification asking you if you want to launch Tunnelblick now. (An administrator username and password will be required to secure Tunnelblick). Click Launch.

      Alternatively, you can click on the Tunnelblick icon in the status bar and select VPN Details...:

    "},{"location":"openstack/create-and-connect-to-the-VM/using-vpn/openvpn/tunnelblick_for_macos/#set-up-the-vpn-with-tunnelblick","title":"Set up the VPN with Tunnelblick","text":"
    1. A new dialogue box will appear. Click I have configuration files.

    2. Another notification will pop-up, instructing you how to import configuration files. Click OK.

    3. Drag and drop the previously downloaded .ovpn file from your Downloads folder to the Configurations tab in Tunnelblick.

      OR,

      You can just drag and drop the provided OpenVPN configuration file (file with .ovpn extension) directly to Tunnelblick icon in status bar at the top-right corner of your screen.

    4. A pop-up will appear, asking you if you want to install the configuration profile for your current user only or for all users on your Mac. Select your preferred option. If the VPN is intended for all accounts on your Mac, select All Users. If the VPN will only be used by your current account, select Only Me.

    5. You will be asked to enter your Mac password.

      Then the screen reads \"Tunnelblick successfully: installed one configuration\".

    You can see the configuration setting is loaded and installed successfully.

    "},{"location":"openstack/create-and-connect-to-the-VM/using-vpn/openvpn/tunnelblick_for_macos/#connect-to-a-vpn-server-location","title":"Connect to a VPN server location","text":"
    1. To connect to a VPN server location, click the Tunnelblick icon in status bar at the top-right corner of your screen.

    2. From the drop down menu select the server and click Connect [name of the .ovpn configuration file]..

      Alternatively, you can select \"VPN Details\" from the menu and then click the \"Connect\"button:

      This will show the connection log on the dialog:

    3. When you are connected to OpenVPN server successfully, you will see popup message as shown below. That's it! You are now connected to a VPN.

    4. Once you are connected to the OpenVPN server, you can run commands like shown below to connect to the private instances:

      ssh ubuntu@192.168.0.40 -A -i cloud.key\n

    "},{"location":"openstack/create-and-connect-to-the-VM/using-vpn/openvpn/tunnelblick_for_macos/#disconnect-vpn-server","title":"Disconnect VPN server","text":"

    To disconnect, click on the Tunnelblick icon in your status bar and select Disconnect in the drop-down menu.

    While closing the log will be shown on popup as shown below:

    "},{"location":"openstack/create-and-connect-to-the-VM/using-vpn/sshuttle/","title":"sshuttle","text":"

    sshuttle is a lightweight SSH-encrypted VPN. This is a Python based script that allows you to tunnel connections through SSH in a far more efficient way then traditional ssh proxying.

    "},{"location":"openstack/create-and-connect-to-the-VM/using-vpn/sshuttle/#installing-sshuttle-server","title":"Installing sshuttle Server","text":"

    You can spin up a new instance with \"ubuntu-22.04-x86_64\" or any available Ubuntu OS image, named \"sshuttle_server\" on OpenStack, with \"default\" and \"ssh_only\" Security Groups attached to it.

    Also, attach a Floating IP to this instance so you can ssh into it from outside.

    Finally, you'll want to configure the setting for the remote instances in your SSH configuration file (typically found in ~/.ssh/config). The SSH configuration file might include entry for your newly created sshuttle server like this:

    Host sshuttle\n\n  HostName 140.247.152.244\n  User ubuntu\n  IdentityFile ~/.ssh/cloud.key\n
    1. Then you can ssh into the sshuttle Server running: ssh sshuttle

    Note

    Unlike other VPN servers, for sshuttle you don't need to install anything on the server side. As long as you have an SSH server (with python3 installed) you're good to go.

    "},{"location":"openstack/create-and-connect-to-the-VM/using-vpn/sshuttle/#to-connect-from-a-new-client-install-sshuttle","title":"To connect from a new client Install sshuttle","text":""},{"location":"openstack/create-and-connect-to-the-VM/using-vpn/sshuttle/#windows","title":"Windows","text":"

    Currently there is no built in support for running sshuttle directly on Microsoft Windows. What you can do is to create a Linux VM with Vagrant (or simply Virtualbox if you like) and then try to connect via that VM. For more details read here

    "},{"location":"openstack/create-and-connect-to-the-VM/using-vpn/sshuttle/#mac-os-x","title":"Mac OS X","text":"

    Install using Homebrew:

    brew install sshuttle\n

    OR, via MacPorts

    sudo port selfupdate\nsudo port install sshuttle\n
    "},{"location":"openstack/create-and-connect-to-the-VM/using-vpn/sshuttle/#linux","title":"Linux","text":"

    sshuttle is available through the package management system on most Linux distributions.

    On Debian/Ubuntu:

    sudo apt-get install sshuttle\n

    On RedHat/Rocky/AlmaLinux:

    sudo dnf install sshuttle\n

    It is also possible to install into a virtualenv as a non-root user.

    • From PyPI:
    virtualenv -p python3 /tmp/sshuttle\n. /tmp/sshuttle/bin/activate\npip install sshuttle\n
    • Clone:
    virtualenv -p python3 /tmp/sshuttle\n. /tmp/sshuttle/bin/activate\ngit clone [https://github.com/sshuttle/sshuttle.git](https://github.com/sshuttle/sshuttle.git)\ncd sshuttle\n./setup.py install\n
    "},{"location":"openstack/create-and-connect-to-the-VM/using-vpn/sshuttle/#how-to-connect","title":"How to Connect","text":"

    Tunnel to all networks (0.0.0.0/0):

    sshuttle -r ubuntu @140.247.152.244 0.0.0.0/0\n

    OR, shorthand:

    sudo sshuttle -r ubuntu@140.247.152.244 0/0\n

    If you would also like your DNS queries to be proxied through the DNS server of the server, you are connected to:

    sshuttle --dns -r ubuntu@140.247.152.244 0/0\n

    "},{"location":"openstack/create-and-connect-to-the-VM/using-vpn/wireguard/","title":"WireGuard","text":"

    WireGuard is an extremely simple yet fast and modern VPN that utilizes state-of-the-art cryptography.

    Here's what it will look like:

    "},{"location":"openstack/create-and-connect-to-the-VM/using-vpn/wireguard/#installing-wireguard-server","title":"Installing WireGuard Server","text":"

    You can spin up a new instance with \"ubuntu-22.04-x86_64\" or any available Ubuntu OS image, named \"wireguard_server\" on OpenStack, with \"default\" and \"ssh_only\" Security Groups attached to it.

    Also, attach a Floating IP to this instance so you can ssh into it from outside.

    Create a new Security Group i.e. \"wireguard\" that is listening on UDP port 51820 as shown below:

    The Security Groups attached to the WireGuard server includes \"default\", \"ssh_only\" and \"wireguard\". It should look similar to the image shown below:

    Finally, you'll want to configure the setting for the remote instances in your SSH configuration file (typically found in ~/.ssh/config). The SSH configuration file might include entry for your newly created WireGuard server like this:

    Host wireguard\n  HostName 140.247.152.188\n  User ubuntu\n  IdentityFile ~/.ssh/cloud.key\n
    1. Then you can ssh into the WireGuard Server running: ssh wireguard

    2. Also note that WireGuard must be installed and run by a user who has administrative/root privileges. So, we need to run the command: sudo su

    3. We are using this repo to install WireGuard server on this ubuntu server.

      For that, run the script and follow the assistant:

      wget https://git.io/wireguard -O wireguard-install.sh && bash wireguard-install.sh\n

      You can press Enter for all default values. And, while entering a name for the first client you can give \"nerc\" as the client name, this will generate a new configuration file (.conf file) named as \"nerc.conf\". Based on your client's name it will name the config file as \".conf\"

      NOTE: For each peers the client configuration files comply with the following template:

    4. Copy the generated config file from \"/root/nerc.conf\" to \"/home/ubuntu/nerc.conf\" by running: cp /root/nerc.conf .

    5. Update the ownership of the config file to ubuntu user and ubuntu group by running the following command: chown ubuntu:ubuntu nerc.conf

    6. You can exit from the root and ssh session all together and then copy the configuration file to your local machine by running the following script on your local machine's terminal: scp wireguard:nerc.conf .

    7. "},{"location":"openstack/create-and-connect-to-the-VM/using-vpn/wireguard/#to-add-a-new-client-user","title":"To add a new client user","text":"

      Once it ends, you can run it again to add more users, remove some of them or even completely uninstall WireGuard.

      For this, run the script and follow the assistant:

      wget https://git.io/wireguard -O wireguard-install.sh && bash wireguard-install.sh\n

      Here, you are giving client name as \"mac_client\" and that will generate a new configuration file at \"/root/mac_client.conf\". You can repeat above steps: 4 to 6 to copy this new client's configuration file and share it to the new client.

      "},{"location":"openstack/create-and-connect-to-the-VM/using-vpn/wireguard/#authentication-mechanism","title":"Authentication Mechanism","text":"

      It would be kind of pointless to have our VPN server allow anyone to connect. This is where our public & private keys come into play.

      • Each client's **public** key needs to be added to the SERVER'S configuration file

      • The server's **public** key added to the CLIENT'S configuration file

      "},{"location":"openstack/create-and-connect-to-the-VM/using-vpn/wireguard/#useful-commands","title":"Useful commands","text":"

      To view server config: wg show or, wg

      To activateconfig: wg-quick up /path/to/file_name.config

      To deactivate config: wg-quick down /path/to/file_name.config

      Read more:

      https://git.zx2c4.com/wireguard-tools/about/src/man/wg.8

      https://git.zx2c4.com/wireguard-tools/about/src/man/wg-quick.8

      Important Note

      You need to contact your project administrator to get your own WireGUard configuration file (file with .conf extension). Download it and Keep it in your local machine so in next steps we can use this configuration client profile file.

      A WireGuard client or compatible software is needed to connect to the WireGuard VPN server. Please installone of these clients depending on your device. The client program must be configured with a client profile to connect to the WireGuard VPN server.

      "},{"location":"openstack/create-and-connect-to-the-VM/using-vpn/wireguard/#windows","title":"Windows","text":"

      WireGuard client can be downloaded here. The WireGuard executable should be installed on client machines. After the installation, you should see the WireGuard icon in the lower-right corner of the screen located at the taskbar notification area.

      "},{"location":"openstack/create-and-connect-to-the-VM/using-vpn/wireguard/#set-up-the-vpn-with-wireguard-gui","title":"Set up the VPN with WireGuard GUI","text":"

      Next, we configure the VPN tunnel. This includes setting up the endpoints and exchanging the public keys.

      Open the WireGuard GUI and either click on Add Tunnel -> Import tunnel(s) from file\u2026 OR,

      click on \"Import tunnel(s) from file\" button located at the center.

      The software automatically loads the client configuration. Also, it creates a public key for this new tunnel and displays it on the screen.

      Either, Right Click on your tunnel name and select \"Edit selected tunnel\u2026\" menu OR, click on \"Edit\" button at the lower left.

      Checking Block untunneled traffic (kill-switch) will make sure that all your traffic is being routed through this new VPN server.

      "},{"location":"openstack/create-and-connect-to-the-VM/using-vpn/wireguard/#test-your-connection","title":"Test your connection","text":"

      On your Windows machine, press the \"Activate\" button. You should see a successful connection be made:

      After a few seconds, the status should change to Active.

      If the connection is routed through the VPN, it should show the IP address of the WireGuard server as the public address.

      If that's not the case, to troubleshoot please check the \"Log\" tab and verify and validate the client and server configuration.

      Clicking \" Deactivate\" button closes the VPN connection.

      "},{"location":"openstack/create-and-connect-to-the-VM/using-vpn/wireguard/#mac-os-x","title":"Mac OS X","text":""},{"location":"openstack/create-and-connect-to-the-VM/using-vpn/wireguard/#i-using-homebrew","title":"I. Using HomeBrew","text":"

      This allows more than one Wireguard tunnel active at a time unlike the WireGuard GUI app.

      1. Install WireGuard CLI on macOS through brew: brew install wireguard-tools

      2. Copy the \".conf\" file to \"/usr/local/etc/wireguard/\" (or \"/etc/wireguard/\"). You'll need to create the \" wireguard\" directory first. For your example, you will have your config file located at: \" /usr/local/etc /wireguard/mac_client.conf\" or, \"/etc/wireguard/mac_client.conf\"

      3. To activate the VPN: \"wg-quick up [name of the conf file without including .conf extension]\". For example, in your case, running wg-quick up mac_client - If the peer system is already configured and its interface is up, then the VPN connection should establish automatically, and you should be able to start routing traffic through the peer.

      Use wg-quick down mac_client to take the VPN connection down.

      "},{"location":"openstack/create-and-connect-to-the-VM/using-vpn/wireguard/#ii-using-wireguard-gui-app","title":"II. Using WireGuard GUI App","text":"
      1. Download WireGuard Client from the macOS App Store

        You can find the official WireGuard Client app on the App Store here.

      2. Set up the VPN with WireGuard

        Next, we configure the VPN tunnel. This includes setting up the endpoints and exchanging the public keys.

        Open the WireGuard GUI by directly clicking WireGuard icon in status bar at the top-right corner of your screen.

        And then click on \"Import tunnel(s) from file\" menu to load your client config file.

        OR,

        Find and click the WireGUard GUI from your Launchpad and then either click on Add Tunnel -> Import tunnel(s) from file\u2026 or, just click on \"Import tunnel(s) from file\" button located at the center.

        Browse to the configuration file:

        The software automatically loads the client configuration. Also, it creates a public key for this new tunnel and displays it on the screen.

        If you would like your computer to automatically connect to the WireGuard VPN server as soon as either (or both) Ethernet or Wi-Fi network adapter becomes active, check the relevant 'On-Demand' checkboxes for \"Ethernet\" and \" Wi-Fi\".

        Checking Exclude private IPs will generate a list of networks which excludes the server IP address and add them to the AllowedIPs list. This setting allows you to pass all your traffic through your Wireguard VPN EXCLUDING private address ranges like 10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16.

      3. Test your connection

        On your Windows machine, press the \"Activate\" button. You should see a successful connection be made:

        After a few seconds, the status should change to Active.

        Clicking \"Deactivate\" button from the GUI's interface or directly clicking \"Deactivate\" menu from the WireGuard icon in status bar at the top-right corner of your screen closes the VPN connection.

      "},{"location":"openstack/create-and-connect-to-the-VM/using-vpn/wireguard/#linux","title":"Linux","text":"

      WireGuard is available through the package management system on most Linux distributions.

      On Debian/Ubuntu:

      sudo apt update\nsudo apt-get install wireguard resolvconf -y\n

      On RedHat/Rocky/AlmaLinux:

      sudo dnf install wireguard\n

      Then, to run WireGuard using the client profile: Move the VPN client profile (configuration) file to /etc/wireguard/:

      sudo mv nerc.conf /etc/wireguard/client.conf\n

      Restart the WireGuard daemon (i.e., This will start WireGuard connection and will automatically run on boot):

      sudo /etc/init.d/wireguard start\n

      OR,

      sudo systemctl enable --now wg-quick@client\nsudo systemctl start wg-quick@client\n

      OR,

      wg-quick up /etc/wireguard/client.conf\n

      Checking the status:

      systemctl status wg-quick@client\n

      Alternatively, if you want to run WireGuard manually each time, then run:

      sudo wireguard --config /etc/wireguard/client.conf\n

      OR,

      sudo wireguard --config nerc.conf\n
      "},{"location":"openstack/create-and-connect-to-the-VM/using-vpn/wireguard/#to-test-the-connection","title":"To test the connection","text":"

      Once you are connected to the WireGuard server, you can run commands like shown below in your terminal to connect to the private instances: ssh ubuntu@192.168. 0.40 -A -i cloud.key

      "},{"location":"openstack/data-transfer/data-transfer-from-to-vm/","title":"Data Transfer To/From NERC VM","text":""},{"location":"openstack/data-transfer/data-transfer-from-to-vm/#transfer-using-volume","title":"Transfer using Volume","text":"

      You may wish to transfer a volume which includes all data to a different project which can be your own (with access in project dropdown list) or external collaborators with in NERC. For this you can follow this guide.

      Very Important Note

      If you transfer the volume then that will be removed from the source and will only be available on destination project.

      "},{"location":"openstack/data-transfer/data-transfer-from-to-vm/#using-globus","title":"Using Globus","text":"

      Globus is a web-based service that is the preferred method for transferring substantial data between NERC VM and other locations. It effectively tackles the typical obstacles researchers encounter when moving, sharing, and storing vast quantities of data. By utilizing Globus, you can delegate data transfer tasks to a managed service that oversees the entire process. This service monitors performance and errors, retries failed transfers, automatically resolves issues whenever feasible, and provides status updates to keep you informed. This allows you to concentrate on your research while relying on Globus to handle data movement efficiently. For information on the user-friendly web interface of Globus and its flexible REST/API for creating scripted tasks and operations, please visit Globus.org.

      Important Information

      For large data sets and/or for access by external users, consider using Globus. An institutional endpoint/collection is not required to use Globus - you can set up a personal endpoint on your NERC VM and also on your local machine if you need to transfer large amounts of data.

      "},{"location":"openstack/data-transfer/data-transfer-from-to-vm/#setting-up-a-personal-globus-endpoint-on-nerc-vm","title":"Setting up a Personal Globus Endpoint on NERC VM","text":"

      You can do this using Globus Connect Personal to configure an endpoint on your NERC VM. In general, it is always fastest to setup a Personal endpoint on your NERC VM, and then use that endpoint for transfers to/from a local machine or any other shared or private Globus endpoints.

      You can find instructions for downloading and installing the Globus Connect Personal on the Globus web site.

      Helpful Tip

      You may get a \"Permission Denied\" error for certain paths with Globus Connect Personal. If you do, you may need to add this path to your list of allowed paths for Globus Connect Personal. You can do this by editing the ~/.globusonline/lta/config-paths file and adding the new path as a line in the end of the list. The path must be followed by sharing (0/1) and R/W (0/1) flags.

      For example, to enable read-write access to the /data/tables directory, add the following line i.e. /data/tables,0,1.

      "},{"location":"openstack/data-transfer/data-transfer-from-to-vm/#usage-of-globus","title":"Usage of Globus","text":"

      Once a Personal Endpoint is set up on a NERC VM, you will be able to find that named collection on Globus file explorer and then can be chosen as source or destination for data transfer to/from another Guest Collection (Globus Shared Endpoints). Login into the Globus web interface, select your organization which will allow you to log in to Globus, and land on File Manager page.

      If your account belong to Globus Subscription that you will be able to use data transfers between two personal endpoints i.e. you can setup your local machine as another personal endpoint.

      "},{"location":"openstack/data-transfer/data-transfer-from-to-vm/#using-scp","title":"Using SCP","text":"

      Important Information

      SCP is suggested for smaller files (<~10GB), otherwise use Globus. When you want to transfer many small files in a directory, we recommend Globus.

      We generally recommend using SCP (Secure Copy) to copy data to and from your VM. SCP is used to securely transfer files between two hosts using the Secure Shell (ssh) protocol. It\u2019s usage is simple, but the order that file locations are specified is crucial. SCP always expects the 'from' location first, then the 'to' destination. Depending on which is the remote system, you will prefix your username and Floating IP of your NERC VM.

      scp [username@Floating_IP:][location of file] [destination of file]

      or,

      scp [location of file] [username@Floating_IP:][destination of file]

      "},{"location":"openstack/data-transfer/data-transfer-from-to-vm/#usage","title":"Usage","text":"

      Below are some examples of the two most common scenarios of SCP to copy to and from various sources.

      Helpful Tip

      We use '~' in the examples. The tilde '~' is a Unix short-hand that means \"my home directory\". So if user almalinux uses ~/ this is the same as typing out the full path to almalinux user's home directory (easier to remember than /home/almalinux/). You can, of course, specify other paths (ex. \u2013 /user/almalinux/output/files.zip) Also, we use . in the examples to specify the current directory path from where the command is issued. This can be replaced with the actual path.

      i. Copying Files From the NERC VM to Another Computer:

      From a terminal/shell from your local machine, you'll issue your SCP command by specifying the SSH Private Key to connect with the VM that has included corresponding SSH Public Key. The syntax is:

      scp -i <Your SSH Private Key including Path> <Default User name based on OS>@<Your Floating IP of VM>:~/<File In VM> .\n

      This copies the file <File In VM> from your VM's default user's directory (~ is a Unix shortcut for my home directory) on your VM to your current directory (. is a Unix shortcut the current directory) on your computer from where the command is issued or you can specify the actual path instead of ..

      For e.g.

      scp -i ~/.ssh/your_pem_key_file.pem almalinux@199.94.60.219:~/myfile.zip /my_local_directory/\n

      ii. Copying Files From Another Computer to the NERC VM:

      From a terminal/shell on your computer (or another server or cluster) where you have access to the SSH Private Key, you'll issue your SCP command. The syntax is:

      scp -i <Your SSH Private Key including Path> ./<Your Local File> <Default User name based on OS>@<Your Floating IP of VM>:~/`\n

      This copies the file <Your Local File> from the current directory on the computer you issued the command from, to your home directory on your NERC VM. (recall that . is a Unix shortcut for the current directory path and ~ is a Unix shortcut for my home directory)

      For e.g.

      scp -i ~/.ssh/your_pem_key_file.pem ./myfile.zip almalinux@199.94.60.219:~/myfile.zip\n

      Important Note

      While it\u2019s probably best to compress all the files you intend to transfer into one file, this is not always an option. To copy the contents of an entire directory, you can use the -r (for recursive) flag.

      For e.g.

      scp -i ~/.ssh/your_pem_key_file.pem -r almalinux@<Floating_IP>:~/mydata/ ./destination_directory/\n

      This copies all the files from ~/mydata/ on the cluster to the current directory (i.e. .) on the computer you issued the command from. Here we can replace ./ with actual full path on you local machine and also ~/ with actual full path on your NERC VM.

      "},{"location":"openstack/data-transfer/data-transfer-from-to-vm/#using-tarssh","title":"Using tar+ssh","text":"

      When you want to transfer many small files in a directory, we recommend Globus. If you don't wish to use Globus, you can consider using ssh piped with tar.

      i. Send a directory to NERC VM:

      tar cz /local/path/dirname | ssh -i <Your SSH Private Key including Path> <Default User name based on OS>@<Your Floating IP of VM> tar zxv -C /remote/path\n

      ii. Get a directory from NERC VM:

      ssh -i <Your SSH Private Key including Path> <Default User name based on OS>@<Your Floating IP of VM> tar cz /remote/path/dirname | tar zxv -C /local/path\n
      "},{"location":"openstack/data-transfer/data-transfer-from-to-vm/#using-rsync","title":"Using rsync","text":"

      Rsync is a fast, versatile, remote (and local) file-copying tool. It is famous for its delta-transfer algorithm, which reduces the amount of data sent over the network by sending only the differences between the source files and the existing files in the destination. This can often lead to efficiencies in repeat-transfer scenarios, as rsync only copies files that are different between the source and target locations (and can even transfer partial files when only part of a file has changed). This can be very useful in reducing the amount of copies you may perform when synchronizing two datasets.

      The basic syntax is: rsync SOURCE DESTINATION where SOURCE and DESTINATION are filesystem paths. They can be local, either absolute or relative to the current working directory, or they can be remote but prefixing something like USERNAME@HOSTNAME: to the front of them.

      i. Synchronizing from a local machine to NERC VM:

      rsync -avxz ./source_directory/ -e \"ssh -i ~/.ssh/your_pem_key_file.pem\" <user_name>@<Floating_IP>:~/destination_directory/\n

      ii. Synchronizing from NERC VM to a local machine:

      rsync -avz -e \"ssh -i ~/.ssh/your_pem_key_file.pem\" -r <user_name>@<Floating_IP>:~/source_directory/ ./destination_directory/\n

      iii. Update a previously made copy of \"foo\" on the NERC VM after you\u2019ve made changes to the local copy:

      rsync -avz --delete foo/ -e \"ssh -i ~/.ssh/your_pem_key_file.pem\" <user_name>@<Floating_IP>:~/foo/\n

      Be careful with this option!

      The --delete option has no effect when making a new copy, and therefore can be used in the previous example too (making the commands identical), but since it recursively deletes files, it\u2019s best to use it sparingly. If you want to maintain a mirror (i.e. the DESTINATION is to be an exact copy of the SOURCE) then you will want to add the --delete option. This deletes files/directories in the DESTINATION that are no longer in the SOURCE.

      iv. Update a previously made copy of \"foo\" on the NERC VM after you or someone else has already updated it from a different source:

      rsync -aAvz --update foo/ -e \"ssh -i ~/.ssh/your_pem_key_file.pem\" <user_name>@<Floating_IP>:~/foo/\n

      Information

      The --update option has no effect when making a new copy and can also be specified in that case. If you're updating a master copy (i.e. the DESTINATION may have files that are newer than the version(s) in SOURCE) then you will also want to add the --update option. This will leave those files alone and not revert them to the older copy in SOURCE.

      "},{"location":"openstack/data-transfer/data-transfer-from-to-vm/#progress-verbosity-statistics","title":"Progress, Verbosity, Statistics","text":"

      -v Verbose mode \u2014 list each file transferred. Adding more vs makes it more verbose.

      --progress Show a progress meter for each individual file transfer that is part of the entire operation. If you have many small files then this option can significantly slow down the transfer.

      --stats Print a short paragraph of statistics at the end of the session (e.g. average transfer rate, total number of files transferred, etc).

      "},{"location":"openstack/data-transfer/data-transfer-from-to-vm/#other-useful-options","title":"Other Useful Options","text":"

      --dry-run Perform a dry-run of the session instead of actually modifying the DESTINATION. Mostly useful when adding multiple -v options, especially for verifying --delete is doing what you want.

      --exclude PATTERN Skip files/directories in the SOURCE that match a given pattern (supports regular expressions)

      "},{"location":"openstack/data-transfer/data-transfer-from-to-vm/#using-rclone","title":"Using Rclone","text":"

      rclone is a convenient and performant command-line tool for transferring files and synchronizing directories directly between your local file systems and a given NERC VM.

      Prerequisites:

      To run the rclone commands, you need to have:

      • To run the rclone commands you will need to have rclone installed. See Downloading and Installing the latest version of Rclone for more information.
      "},{"location":"openstack/data-transfer/data-transfer-from-to-vm/#configuring-rclone","title":"Configuring Rclone","text":"

      First you'll need to configure rclone. The filesystem protocols, especially, can have complicated authentication parameters so it's best to place these details in a config file.

      If you run rclone config file you will see where the default location is for your current user.

      Note

      For Windows users, you may need to specify the full path to the Rclone executable file if it's not included in your system's %PATH% variable.

      Edit the config file's content on the path location described by rclone config file command and add the following entry with the name [nerc]:

      [nerc]\ntype = sftp\nhost = 199.94.60.219\nuser = almalinux\nport =\npass =\nkey_file = C:\\Users\\YourName\\.ssh\\cloud.key\nshell_type = unix\n

      More about the config for SFTP can be found here.

      OR, You can locally copy this content to a new config file and then use this flag to override the config location, e.g. rclone --config=FILE

      Interactive Configuration

      Run rclone config to setup. See Rclone config docs for more details.

      "},{"location":"openstack/data-transfer/data-transfer-from-to-vm/#how-to-use-rclone","title":"How to use Rclone","text":"

      rclone supports many subcommands (see the complete list of Rclone subcommands). A few commonly used subcommands (assuming you configured the NERC VM filesystem as nerc):

      "},{"location":"openstack/data-transfer/data-transfer-from-to-vm/#listing-files-and-folders","title":"Listing Files and Folders","text":"

      Once your NERC VM filesystem has been configured in Rclone, you can then use the Rclone interface to List all the directories with the \"lsd\" command:

      rclone lsd \"nerc:\"\n

      or,

      rclone lsd \"nerc:\" --config=rclone.conf\n

      For e.g.

      rclone lsd \"nerc:\" --config=rclone.conf\n        -1 2023-07-06 12:18:24        -1 .ssh\n        -1 2023-07-06 19:27:19        -1 destination_directory\n

      To list the files and folders available within the directory (i.e. \"destination_directory\") we can use the \"ls\" command:

      rclone ls \"nerc:destination_directory/\"\n  653 README.md\n    0 image.png\n   12 test-file\n
      "},{"location":"openstack/data-transfer/data-transfer-from-to-vm/#uploading-and-downloading-files-and-folders","title":"Uploading and Downloading Files and Folders","text":"

      rclone support a variety of options to allow you to copy, sync, and move files from one destination to another.

      A simple example of this can be seen below where we copy/upload the file upload.me to the <your-directory> directory:

      rclone copy \"./upload.me\" \"nerc:<your-directory>/\"\n

      Another example, to copy/download the file upload.me from the remote directory, <your-directory>, to your local machine:

      rclone -P copy \"nerc:<your-directory>/upload.me\" \"./\"\n

      Also, to Sync files into the <your-directory> directory it's recommended to first try with --dry-run first. This will give you a preview of what would be synced without actually performing any transfers.

      rclone --dry-run sync /path/to/files nerc:<your-directory>\n

      Then sync for real

      rclone sync --interactive /path/to/files nerc:<your-directory>\n
      "},{"location":"openstack/data-transfer/data-transfer-from-to-vm/#mounting-vm-filesystem-on-local-filesystem","title":"Mounting VM filesystem on local filesystem","text":"

      Linux:

      First, you need to create a directory on which you will mount your filesystem:

      mkdir ~/mnt-rclone

      Then you can simply mount your filesystem with:

      rclone -vv --vfs-cache-mode writes mount nerc: ~/mnt-rclone

      Windows:

      First you have to download Winfsp:

      WinFsp is an open source Windows File System Proxy which provides a FUSE emulation layer.

      Then you can simply mount your VM's filesystem with (no need to create the directory in advance):

      rclone -vv --vfs-cache-mode writes mount nerc: C:/mnt-rclone

      The vfs-cache-mode flag enables file caching. You can use either the writes or full option. For further explanation you can see the official documentation.

      Now that your VM's filesystem is mounted locally, you can list, create, and delete files in it.

      "},{"location":"openstack/data-transfer/data-transfer-from-to-vm/#unmount-nerc-vm-filesystem","title":"Unmount NERC VM filesystem","text":"

      To unmount, simply press CTRL-C and the mount will be interrupted.

      "},{"location":"openstack/data-transfer/data-transfer-from-to-vm/#using-graphical-user-interface-gui-tools","title":"Using Graphical User Interface (GUI) Tools","text":""},{"location":"openstack/data-transfer/data-transfer-from-to-vm/#i-winscp","title":"i. WinSCP","text":"

      WinSCP is a popular and free open-source SFTP client, SCP client, and FTP client for Windows. Its main function is file transfer between a local and a remote computer, with some basic file management functionality using FTP, FTPS, SCP, SFTP, WebDAV, or S3 file transfer protocols.

      Prerequisites:

      • WinSCP installed, see Download and Install the latest version of the WinSCP for more information.

      • Go to WinSCP menu and open \"View > Preferences\".

      • When the \"Preferences\" dialog window appears, select \"Transfer\" in the options on the left pane.

      • Click on the \"Edit\" button.

      • Then, in the popup dialog box, review the \"Common options\" group and uncheck the \"Preserve timestamp\" option as shown below:

      "},{"location":"openstack/data-transfer/data-transfer-from-to-vm/#configuring-winscp","title":"Configuring WinSCP","text":"
      • Click on the \"New Tab\" button as shown below:
      • Select either \"SFTP\" or \"SCP\" from the \"File protocol\" dropdown options as shown below:
      • Provide the following required information:

      \"File protocol\": Choose either \"\"SFTP\" or \"SCP\"\"

      \"Host name\": \"<Your Floating IP of VM>\"

      \"Port number\": \"22\"

      \"User name\": \"<Default User name based on OS>\"

      Default User name based on OS

      • all Ubuntu images: ubuntu

      • all AlmaLinux images: almalinux

      • all Rocky Linux images: rocky

      • all Fedora images: fedora

      • all Debian images: debian

      • all RHEL images: cloud-user

      If you still have VMs running with deleted CentOS images, you need to use the following default username for your CentOS images: centos.

      \"Password\": \"<Leave blank as you using SSH key>\"

      • Change Authentication Options

      Before saving, click the \"Advanced\" button. In the \"Advanced Site Settings\", under \"SSH >> Authentication\" settings, check \"Allow agent forwarding\" and select the private key file with .ppk extension from the file picker.

      Helpful Tip

      You can save your above configured site with some preferred name by clicking the \"Save\" button and then giving a proper name to your site. This prevents needing to manually enter all of your configuration again the next time you need to use WinSCP.

      "},{"location":"openstack/data-transfer/data-transfer-from-to-vm/#using-winscp","title":"Using WinSCP","text":"

      You can follow the above steps to manually add a new site the next time you open WinSCP, or you can connect to your previously saved site. Saved sites will be listed in the popup dialog and can be selected by clicking on the site name.

      Then click the \"Login\" button to connect to your NERC project's VM as shown below:

      You should now be connected to the VM's remote directories/files. You can drag and drop your files to/from file windows to begin transfer. When you're finished, click the \"X\" icon in the top right to disconnect.

      "},{"location":"openstack/data-transfer/data-transfer-from-to-vm/#ii-cyberduck","title":"ii. Cyberduck","text":"

      Cyberduck is a libre server and cloud storage browser for Mac and Windows. Its user-friendly interface enables seamless connections to servers, enterprise file sharing, and various cloud storage platforms.

      Prerequisites:

      • Cyberduck installed, see Download and Install the latest version of the Cyberduck for more information.
      "},{"location":"openstack/data-transfer/data-transfer-from-to-vm/#configuring-cyberduck","title":"Configuring Cyberduck","text":"
      • Click on the \"Open Connection\" button as shown below:
      • Select either \"SFTP\" or \"FTP\" from the dropdown options as shown below:
      • Provide the following required information:

      \"Server\": \"<Your Floating IP of VM>\"

      \"Port\": \"22\"

      \"User name\": \"<Default User name based on OS>\"

      Default User name based on OS

      • all Ubuntu images: ubuntu

      • all AlmaLinux images: almalinux

      • all Rocky Linux images: rocky

      • all Fedora images: fedora

      • all Debian images: debian

      • all RHEL images: cloud-user

      \"Password\": \"<Leave blank as you using SSH key>\"

      \"SSH Private Key\": \"Choose the appropriate SSH Private Key from your local machine that has the corresponding public key attached to your VM\"

      "},{"location":"openstack/data-transfer/data-transfer-from-to-vm/#using-cyberduck","title":"Using Cyberduck","text":"

      Then click the \"Connect\" button to connect to your NERC VM as shown below:

      You should now be connected to the VM's remote directories/files. You can drag and drop your files to/from file windows to begin transfer. When you're finished, click the \"X\" icon in the top right to disconnect.

      "},{"location":"openstack/data-transfer/data-transfer-from-to-vm/#iii-filezilla","title":"iii. Filezilla","text":"

      Filezilla is a free and open source SFTP client which is built on modern standards. It is available cross-platform (Mac, Windows and Linux) and is actively maintained. You can transfer files to/from the cluster from your computer or any resources connected to your computer (shared drives, Dropbox, etc.)

      Prerequisites:

      • Filezilla installed, see Download and Install the latest version of the Filezilla for more information.
      "},{"location":"openstack/data-transfer/data-transfer-from-to-vm/#configuring-filezilla","title":"Configuring Filezilla","text":"
      • Click on \"Site Manager\" icon as shown below:
      • Click on \"New Site\" as shown below:
      • Select either \"SFTP\" or \"FTP\" from the dropdown options as shown below:
      • Provide the following required information:

      \"Server\": \"<Your Floating IP of VM>\"

      \"Port\": \"22\"

      \"Logon Type\": \"Key file\" from the dropdown option

      \"User\": \"<Default User name based on OS>\"

      Default User name based on OS

      • all Ubuntu images: ubuntu

      • all AlmaLinux images: almalinux

      • all Rocky Linux images: rocky

      • all Fedora images: fedora

      • all Debian images: debian

      • all RHEL images: cloud-user

      If you still have VMs running with deleted CentOS images, you need to use the following default username for your CentOS images: centos.

      \"Key file\": \"Browse and choose the appropriate SSH Private Key from you local machine that has corresponding Public Key attached to your VM\"

      "},{"location":"openstack/data-transfer/data-transfer-from-to-vm/#using-filezilla","title":"Using Filezilla","text":"

      Then click \"Connect\" button to connect to your NERC VM as shown below:

      You should now be connected to the VM and see your local files in the left-hand pane and the remote files in the right-hand pane. You can drag and drop between them or drag and drop to/from file windows on your computer. When you're finished, click the \"X\" icon in the top right to disconnect.

      "},{"location":"openstack/decommission/decommission-openstack-resources/","title":"Decommission Your NERC OpenStack Resources","text":"

      You can decommission all of your NERC OpenStack resources sequentially as outlined below.

      "},{"location":"openstack/decommission/decommission-openstack-resources/#prerequisite","title":"Prerequisite","text":"
      • Backup: Back up any critical data or configurations stored on the resources that going to be decommissioned. This ensures that important information is not lost during the process. You can refer to this guide to initiate and carry out data transfer to and from the virtual machine.

      • Shutdown Instances: If applicable, Shut Off any running instances to ensure they are not actively processing data during decommissioning.

      • Setup OpenStack CLI, see OpenStack Command Line setup for more information.

      "},{"location":"openstack/decommission/decommission-openstack-resources/#delete-all-vms","title":"Delete all VMs","text":"

      For instructions on deleting instance(s), please refer to this documentation.

      "},{"location":"openstack/decommission/decommission-openstack-resources/#delete-volumes-and-snapshots","title":"Delete volumes and snapshots","text":"

      For instructions on deleting volume(s), please refer to this documentation.

      To delete snapshot(s), if that snapshot is not used for any running instance.

      Navigate to Project -> Volumes -> Snapshots.

      Unable to Delete Snapshots

      First delete all volumes and instances (and its attached volumes) that are created using the snapshot first, you will not be able to delete the volume snapshots.

      "},{"location":"openstack/decommission/decommission-openstack-resources/#delete-all-custom-built-images-and-instance-snapshot-built-images","title":"Delete all custom built Images and Instance Snapshot built Images","text":"

      Navigate to Project -> Compute -> Images.

      Select all of the custom built that have Visibility set as \"Private\" images to delete.

      "},{"location":"openstack/decommission/decommission-openstack-resources/#delete-your-all-private-networks-routers-and-internal-interfaces-on-the-routers","title":"Delete your all private Networks, Routers and Internal Interfaces on the Routers","text":"

      To review all Network and its connectivities, you need to:

      Navigate to Project -> Network -> Network Topology.

      This will shows all view of current Network in your project in Graph or Topology view. Make sure non instances are connected to your private network, which is setup by following this documentation. If there are any instances then refer this to delete those VMs.

      First, delete all other Routers used to create private networks, which is setup by following this documentation except default_router from:

      Navigate to Project -> Network -> Routers.

      First, delete all other Routers used to create private networks except default_network and provider then only you will be able to delete the Networks from:

      Navigate to Project -> Network -> Networks.

      Unable to Delete Networks

      First delete all instances and then delete all routers then only you will be able to delete the associated private networks.

      "},{"location":"openstack/decommission/decommission-openstack-resources/#release-all-floating-ips","title":"Release all Floating IPs","text":"

      Navigate to Project -> Network -> Floating IPs.

      For instructions on releasing your allocated Floating IP back into the NERC floating IP pool, please refer to this documentation.

      "},{"location":"openstack/decommission/decommission-openstack-resources/#clean-up-all-added-security-groups","title":"Clean up all added Security Groups","text":"

      First, delete all other security groups except default also make sure the default security group does not have any extra rules. To view all Security Groups:

      Navigate to Project -> Network -> Security Groups.

      Unable to Delete Security Groups

      First delete all instances and then only you will be able to delete the security groups. If a security group is attached to a VM, that security group will not be allowed to delete.

      "},{"location":"openstack/decommission/decommission-openstack-resources/#delete-all-of-your-stored-key-pairs","title":"Delete all of your stored Key Pairs","text":"

      Navigate to Project -> Compute -> Key Pairs.

      Unable to Delete Key Pairs

      First delete all instances that are using the selected Key Pairs then only you will be to delete them.

      "},{"location":"openstack/decommission/decommission-openstack-resources/#delete-all-buckets-and-objects","title":"Delete all buckets and objects","text":"

      For instructions on deleting bucket(s) along with all objects, please refer to this documentation.

      To delete snapshot(s), if that snapshot is not used for any running instance.

      Navigate to Project -> Object Store -> Containers.

      Unable to Delete Container with Objects inside

      First delete all objects inside a Container first, then only you will be able to delete the container. Please make sure any critical objects data are already been remotely backed up before deleting them. You can also use openstack client to recursively delete the containers which has multi-level objects inside as described here. So, you don't need to manually delete all objects inside a container prior deleting the container. This will save a lot of your time and effort.

      "},{"location":"openstack/decommission/decommission-openstack-resources/#use-coldfront-to-reduce-the-storage-quota-to-zero","title":"Use ColdFront to reduce the Storage Quota to Zero","text":"

      Each allocation, whether requested or approved, will be billed based on the pay-as-you-go model. The exception is for Storage quotas, where the cost is determined by your requested and approved allocation values to reserve storage from the total NESE storage pool. For NERC (OpenStack) Resource Allocations, storage quotas are specified by the \"OpenStack Volume Quota (GiB)\" and \"OpenStack Swift Quota (GiB)\" allocation attributes.

      Even if you have deleted all volumes, snapshots, and object storage buckets and objects in your OpenStack project. It is very essential to adjust the approved values for your NERC (OpenStack) resource allocations to zero (0) otherwise you still be incurring a charge for the approved storage as explained in Billing FAQs.

      To achieve this, you must submit a final change request to reduce the Storage Quotas for the \"OpenStack Volume Quota (GiB)\" and \"OpenStack Swift Quota (GiB)\" to zero (0) for your NERC (OpenStack) resource type. You can review and manage these resource allocations by visiting the resource allocations. Here, you can filter the allocation of your interest and then proceed to request a change request.

      Please make sure your change request looks like this:

      Wait until the requested resource allocation gets approved by the NERC's admin.

      After approval, kindly review and verify that the quotas are accurately reflected in your resource allocation and OpenStack project. Please ensure that the approved quota values are accurately displayed as explained here.

      "},{"location":"openstack/decommission/decommission-openstack-resources/#review-your-block-storagevolumecinder-quota","title":"Review your Block Storage(Volume/Cinder) Quota","text":"

      Please confirm and verify that the gigabytes resource value that specifies total space in external volumes is set to a limit of zero (0) in correspondence with the approved \"OpenStack Volume Quota (GiB)\" of your allocation when running openstack quota show openstack client command as shown below:

      openstack quota show\n+-----------------------+--------+\n| Resource              |  Limit |\n+-----------------------+--------+\n...\n| gigabytes             |      0 |\n...\n+-----------------------+--------+\n
      "},{"location":"openstack/decommission/decommission-openstack-resources/#review-your-object-storageswift-quota","title":"Review your Object Storage(Swift) Quota","text":"

      To check the overall space used, you can use the following command

      Also, please confirm and verify that the Quota-Bytes property value is set to a limit of zero (0) in correspondence with the approved \"OpenStack Swift Quota (GiB)\" of your allocation and also check the overall space used in Bytes is one (1) along with no Containers and Objects, when running openstack object store account show openstack client command as shown below:

      openstack object store account show\n+------------+---------------------------------------+\n| Field      | Value                                 |\n+------------+---------------------------------------+\n| Account    | AUTH_5e1cbcfe729a4c7e8fb2fd5328456eea |\n| Bytes      | 0                                     |\n| Containers | 0                                     |\n| Objects    | 0                                     |\n| properties | Quota-Bytes='1'                       |\n+------------+---------------------------------------+\n
      "},{"location":"openstack/decommission/decommission-openstack-resources/#review-your-project-usage","title":"Review your Project Usage","text":"

      Several commands are available to access project-level resource utilization details. The openstack limits show --absolute command offers a comprehensive view of the most crucial resources and also allows you to view your current resource consumption.

      Multiple commands are at your disposal to access project resource utilization details. The openstack limits show --absolute command offers a comprehensive view of critical resources and allows you to assess your current resource consumption.

      Very Important: Ensure No Resources that will be Billed are Used

      Most importantly, ensure that there is no active usage for any of your currently allocated project resources.

      Please ensure the output appears as follows, with all used resources having a value of zero (0), except for totalSecurityGroupsUsed.

      openstack limits show --absolute\n+--------------------------+-------+\n| Name                     | Value |\n+--------------------------+-------+\n...\n| totalRAMUsed             |     0 |\n| totalCoresUsed           |     0 |\n| totalInstancesUsed       |     0 |\n| totalFloatingIpsUsed     |     0 |\n| totalSecurityGroupsUsed  |     1 |\n| totalServerGroupsUsed    |     0 |\n...\n| totalVolumesUsed         |     0 |\n| totalGigabytesUsed       |     0 |\n| totalSnapshotsUsed       |     0 |\n| totalBackupsUsed         |     0 |\n| totalBackupGigabytesUsed |     0 |\n+--------------------------+-------+\n
      "},{"location":"openstack/decommission/decommission-openstack-resources/#review-your-projects-resource-quota-from-the-openstack-dashboard","title":"Review your Project's Resource Quota from the OpenStack Dashboard","text":"

      After removing all OpenStack resources and updating the Storage Quotas to set them to zero (0), you can review and verify that these changes are reflected in your Horizon Dashboard Overview.

      Navigate to Project -> Compute -> Overview.

      "},{"location":"openstack/decommission/decommission-openstack-resources/#finally-archive-your-coldfront-project","title":"Finally, Archive your ColdFront Project","text":"

      As a PI, you will now be able to Archive your ColdFront Project via accessing NERC's ColdFront interface. Please refer to these intructions on how to archive your projects that need to be decommissioned.

      "},{"location":"openstack/logging-in/access-the-openstack-dashboard/","title":"Access the OpenStack Dashboard","text":"

      The OpenStack Dashboard which is a web-based graphical interface, code named Horizon, is located at https://stack.nerc.mghpcc.org.

      The NERC Authentication supports CILogon using Keycloak for gateway authentication and authorization that provides federated login via your institution accounts and it is the recommended authentication method.

      Make sure you are selecting \"OpenID Connect\" (which is selected by default) as shown here:

      Next, you will be redirected to CILogon welcome page as shown below:

      MGHPCC Shared Services (MSS) Keycloak will request approval of access to the following information from the user:

      • Your CILogon user identifier

      • Your name

      • Your email address

      • Your username and affiliation from your identity provider

      which are required in order to allow access your account on NERC's OpenStack dashboard.

      From the \"Selected Identity Provider\" dropdown option, please select your institution's name. If you would like to remember your selected institution name for future logins please check the \"Remember this selection\" checkbox this will bypass the CILogon welcome page on subsequent visits and proceed directly to the selected insitution's identity provider(IdP). Click \"Log On\". This will redirect to your respective institutional login page where you need to enter your institutional credentials.

      Important Note

      The NERC does not see or have access to your institutional account credentials, it points to your selected insitution's identity provider and redirects back once authenticated.

      Once you successfully authenticate you should see an overview of the resources like Compute (instances, VCPUs, RAM, etc.), Volume and Network. You can also see usage summary for provided date range.

      I can't find my virtual machine

      If you are a member of several projects i.e. ColdFront NERC (OpenStack) allocations, you may need to switch the project before you can see and use the OpenStack resources you or your team has created. Clicking on the project dropdown which is displayed near the top right side will popup the list of projects you are in. You can select the new project by hovering and clicking on the project name in that list as shown below:

      "},{"location":"openstack/logging-in/dashboard-overview/","title":"Dashboard Overview","text":"

      When you are logged-in, you will be redirected to the Compute panel which is under the Project tab. In the top bar, you can see the two small tabs: \"Project\" and \"Identity\".

      Beneath that you can see six panels in larger print: \"Project\", \"Compute\", \"Volumes\", \"Network\", \"Orchestration\", and \"Object Store\".

      "},{"location":"openstack/logging-in/dashboard-overview/#project-panel","title":"Project Panel","text":"

      Navigate: Project -> Project

      • API Access: View API endpoints.

      "},{"location":"openstack/logging-in/dashboard-overview/#compute-panel","title":"Compute Panel","text":"

      Navigate: Project -> Compute

      • Overview: View reports for the project.

      • Instances: View, launch, create a snapshot from, stop, pause, or reboot instances, or connect to them through VNC.

      • Images: View images and instance snapshots created by project users, plus any images that are publicly available. Create, edit, and delete images, and launch instances from images and snapshots.

      • Key Pairs: View, create, edit, import, and delete key pairs.

      • Server Groups: View, create, edit, and delete server groups.

      "},{"location":"openstack/logging-in/dashboard-overview/#volume-panel","title":"Volume Panel","text":"

      Navigate: Project -> Volume

      • Volumes: View, create, edit, delete volumes, and accept volume trnasfer.

      • Backups: View, create, edit, and delete backups.

      • Snapshots: View, create, edit, and delete volume snapshots.

      • Groups: View, create, edit, and delete groups.

      • Group Snapshots: View, create, edit, and delete group snapshots.

      "},{"location":"openstack/logging-in/dashboard-overview/#network-panel","title":"Network Panel","text":"

      Navigate: Project -> Network

      • Network Topology: View the network topology.

      • Networks: Create and manage public and private networks.

      • Routers: Create and manage routers.

      • Security Groups: View, create, edit, and delete security groups and security group rules..

      • Load Balancers: View, create, edit, and delete load balancers.

      • Floating IPs: Allocate an IP address to or release it from a project.

      • Trunks: View, create, edit, and delete trunk.

      "},{"location":"openstack/logging-in/dashboard-overview/#orchestration-panel","title":"Orchestration Panel","text":"

      Navigate: Project->Orchestration

      • Stacks: Use the REST API to orchestrate multiple composite cloud applications.

      • Resource Types: view various resources types and their details.

      • Template Versions: view different heat templates.

      • Template Generator: GUI to generate and save template using drag and drop resources.

      "},{"location":"openstack/logging-in/dashboard-overview/#object-store-panel","title":"Object Store Panel","text":"

      Navigate: Project->Object Store

      • Containers: Create and manage containers and objects. In future you would use this tab to create Swift object storage for your projects on a need basis.

      "},{"location":"openstack/management/vm-management/","title":"VM Management","text":"

      RedHat OpenStack offers numerous functionalities for handling virtual machines, and comprehensive information can be found in the official OpenStack site user guide, please keep in mind that certain features may not be fully implemented at NERC OpenStack.

      "},{"location":"openstack/management/vm-management/#instance-management-actions","title":"Instance Management Actions","text":"

      After launching an instance (On the left side bar, click on Project -> Compute -> Instances), several options are available under the Actions menu located on the right hand side of your screen as shown here:

      "},{"location":"openstack/management/vm-management/#renaming-vm","title":"Renaming VM","text":"

      Once a VM is created, its name is set based on user specified Instance Name while launching an instance using Horizon dashboard or specified in openstack server create ... command using openstack client.

      To rename a VM, navigate to Project -> Compute -> Instances.

      Select an instance.

      In the menu list in the actions column, select \"Edit Instance\" by clicking on the arrow next to \"Create Snapshot\" as shown below:

      Then edit the Name and also Description(Optional) in \"Information\" tab and save it:

      "},{"location":"openstack/management/vm-management/#stopping-and-starting","title":"Stopping and Starting","text":"

      Virtual machines can be stopped and initiated using various methods, and these actions are executed through the openstack command with the relevant parameters.

      1. Reboot is equivalent to powering down the machine and then restarting it. A complete boot sequence takes place and thus the machine returns to use in a few minutes.

        Soft Reboot:

        • A soft reboot attempts a graceful shut down and restart of the instance. It sends an ACPI Restart request to the VM. Similar to sending a reboot command to a physical computer.

        • Click Action -> Soft Reboot Instance.

        • Status will change to Reboot.

        Hard Reboot:

        • A hard reboot power cycles the instance. This forcibly restart your VM. Similar to cycling the power on a physical computer.

        • Click Action -> Hard Reboot Instance.

        • Status will change to Hard Reboot.

      2. The Pause & Resume feature enables the temporary suspension of the VM. While in this state, the VM is retained in memory but doesn't receive any allocated CPU time. This proves handy when conducting interventions on a group of servers, preventing the VM from processing during the intervention.

        • Click Action -> Pause Instance.

        • Status will change to Paused.

        • The Resume operation typically completes in less than a second by clicking Action -> Resume Instance.

      3. The Suspend & Resume function saves the VM onto disk and swiftly restores it (in less than a minute). This process is quicker than the stop/start method, and the VM resumes from where it was suspended, avoiding a new boot cycle.

        • Click Action -> Suspend Instance.

        • Status will change to Suspended.

        • The Resume operation typically completes in less than a second by clicking Action -> Resume Instance.

      4. Shelve & Unshelve

        • Click Action -> Shelve Instance.

        • When shelved it stops all computing, stores a snapshot of the instance. The shelved instances are already imaged as part of the shelving process and appear in Project -> Compute -> Images as \"_shelved\".

        • We strongly recommend detaching volumes before shelving.

        • Status will change to Shelved Offloaded.

        • To unshelve the instance, click Action -> Unshelve Instance.

        • Shut Off & Start Instance

          • Click Action -> Shut Off Instance.

          • When shut off it stops active computing, consuming fewer resources than a Suspend.

          • Status will change to Shutoff.

          • To start the shut down VM, click Action -> Start Instance.

        • "},{"location":"openstack/management/vm-management/#using-openstack-client-commands","title":"Using openstack client commands","text":"

          The above mentioned actions can all be performed running the openstack client commands with the following syntax:

          openstack server <operation> <INSTANCE_NAME_OR_ID>\n

          such as,

          openstack server shutoff my-vm\n\nopenstack server restart my-vm\n

          Pro Tip

          If your instance name <INSTANCE_NAME_OR_ID> includes spaces, you need to enclose the name of your instance in quotes, i.e. \"<INSTANCE_NAME_OR_ID>\"

          For example: openstack server restart \"My Test Instance\".

          "},{"location":"openstack/management/vm-management/#create-snapshot","title":"Create Snapshot","text":"
          • Click Action -> Create Snapshot.

          • Instances must have status Active, Suspended, or Shutoff to create snapshot.

          • This creates an image template from a VM instance also known as \"Instance Snapshot\" as described here.

          • The menu will automatically shift to Project -> Compute -> Images once the image is created.

          • The sole distinction between an image directly uploaded to the image data service, glance and an image generated through a snapshot is that the snapshot-created image possesses additional properties in the glance database and defaults to being private.

          Glance Image Service

          Glance is a central image repository which provides discovering, registering, retrieving for disk and server images. More about this service can be found here.

          "},{"location":"openstack/management/vm-management/#rescue-a-vm","title":"Rescue a VM","text":"

          There are instances where a virtual machine may encounter boot failure due to reasons like misconfiguration or issues with the system disk. To diagnose and address the problem, the virtual machine console offers valuable diagnostic information on the underlying cause.

          Alternatively, utilizing OpenStack's rescue functions involves booting the virtual machine using the original image, with the system disk provided as a secondary disk. This allows manipulation of the disk, such as using fsck to address filesystem issues or mounting and editing the configuration.

          Important Note

          We cannot rescue a volume-backed instance that means ONLY instance running using Ephemeral disk can be rescued. Also, this procedure has not been tested for Windows virtual machines.

          VMs can be rescued using either the OpenStack dashboard by clicking Action -> Rescue Instance or via the openstack client using openstack server rescue ... command.

          If however, the virtual machine is no longer required and no data on the associated system or ephemeral disk needs to be preserved, the following command can be run:

          openstack server rescue <INSTANCE_NAME_OR_ID>\n

          or, using Horizon dashboard:

          Navigate to Project -> Compute -> Instances.

          Select an instance.

          Click Action -> Rescue Instance.

          When to use Rescue Instance

          The rescue mode is only for emergency purpose, for example in case of a system or access failure. This will shut down your instance and mount the root disk to a temporary server. Then, you will be able to connect to this server, repair the system configuration or recover your data. You may optionally select an image and set a password on the rescue instance server.

          "},{"location":"openstack/management/vm-management/#troubleshoot-the-disk","title":"Troubleshoot the disk","text":"

          This will reboot the virtual machine and you can then log in using the key pair previously defined. You will see two disks, /dev/vda which is the new system disk and /dev/vdb which is the old one to be repaired.

          ubuntu@my-vm:~$ lsblk\nNAME    MAJ:MIN RM  SIZE RO TYPE MOUNTPOINTS\nloop0     7:0    0   62M  1 loop /snap/core20/1587\nloop1     7:1    0 79.9M  1 loop /snap/lxd/22923\nloop2     7:2    0   47M  1 loop /snap/snapd/16292\nvda     252:0    0  2.2G  0 disk\n\u251c\u2500vda1  252:1    0  2.1G  0 part /\n\u251c\u2500vda14 252:14   0    4M  0 part\n\u2514\u2500vda15 252:15   0  106M  0 part /boot/efi\nvdb     252:16   0   20G  0 disk\n\u251c\u2500vdb1  252:17   0 19.9G  0 part\n\u251c\u2500vdb14 252:30   0    4M  0 part\n\u2514\u2500vdb15 252:31   0  106M  0 part\n

          The old one can be mounted and configuration files edited or fsck'd.

          # lsblk\n# cat /proc/diskstats\n# mkdir /tmp/rescue\n# mount /dev/vdb1 /tmp/rescue\n
          "},{"location":"openstack/management/vm-management/#unrescue-the-vm","title":"Unrescue the VM","text":"

          On completion, the VM can be returned to active state with openstack server unrescue ... openstack client command, and rebooted.

          openstack server unrescue <INSTANCE_NAME_OR_ID>\n

          Then the secondary disk is removed as shown below:

          ubuntu@my-vm:~$ lsblk\nNAME    MAJ:MIN RM  SIZE RO TYPE MOUNTPOINTS\nloop0     7:0    0   47M  1 loop /snap/snapd/16292\nvda     252:0    0   20G  0 disk\n\u251c\u2500vda1  252:1    0 19.9G  0 part /\n\u251c\u2500vda14 252:14   0    4M  0 part\n\u2514\u2500vda15 252:15   0  106M  0 part /boot/efi\n

          Alternatively, using Horizon dashboard:

          Navigate to Project -> Compute -> Instances.

          Select an instance.

          Click Action -> Unrescue Instance.

          And then Action -> Soft Reboot Instance.

          "},{"location":"openstack/management/vm-management/#delete-instance","title":"Delete Instance","text":"

          VMs can be deleted using either the OpenStack dashboard by clicking Action -> Delete Instance or via the openstack client openstack server delete command.

          How can I delete multiple instances at once?

          Using the Horizon dashboard, navigate to Project -> Compute -> Instances. In the Instances panel, you should see a list of all instances running in your project. Select the instances you want to delete by ticking the checkboxes next to their names. Then, click on \"Delete Instances\" button located on the top right side, as shown below:

          Important Note

          This will immediately terminate the instance, delete all contents of the virtual machine and erase the disk. This operation is not recoverable.

          There are other options available if you wish to keep the virtual machine for future usage. These do, however, continue to use quota for the project even though the VM is not running.

          • Snapshot the VM to keep an offline copy of the virtual machine that can be performed as described here.

          If however, the virtual machine is no longer required and no data on the associated system or ephemeral disk needs to be preserved, the following command can be run:

          openstack server delete <INSTANCE_NAME_OR_ID>\n

          or, using Horizon dashboard:

          Navigate to Project -> Compute -> Instances.

          Select an instance.

          Click Action -> Delete Instance.

          Important Note: Unmount volumes first

          Ensure to unmount any volumes attached to your instance before initiating the deletion process, as failure to do so may lead to data corruption in both your data and the associated volume.

          • If the instance is using Ephemeral disk: It stops and removes the instance along with the ephemeral disk. All data will be permanently lost!

          • If the instance is using Volume-backed disk: It stops and removes the instance. If \"Delete Volume on Instance Delete\" was explicitely set to Yes, All data will be permanently lost!. If set to No (which is default selected while launching an instance), the volume may be used to boot a new instance, though any data stored in memory will be permanently lost. For more in-depth information on making your VM setup and data persistent, you can explore the details here.

          • Status will briefly change to Deleting while the instance is being removed.

          The quota associated with this virtual machine will be returned to the project and you can review and verify that looking at your OpenStack dashboard overview.

          • Navigate to Project -> Compute -> Overview.
          "},{"location":"openstack/openstack-cli/launch-a-VM-using-openstack-CLI/","title":"Launch a VM using OpenStack CLI","text":"

          First find the following details using openstack command, we would required these details during the creation of virtual machine.

          • Flavor

          • Image

          • Network

          • Security Group

          • Key Name

          Get the flavor list using below openstack command:

          openstack flavor list\n+--------------------------------------+-------------+--------+------+-----------+-------+-----------+\n| ID                                   | Name        |    RAM | Disk | Ephemeral | VCPUs | Is Public |\n+--------------------------------------+-------------+--------+------+-----------+-------+-----------+\n| 12ded228-1a7f-4d35-b994-7dd394a6ca90 |gpu-su-a100.2| 196608 |   20 |         0 |    24 | True      |\n| 15581358-3e81-4cf2-a5b8-c0fd2ad771b4 | mem-su.8    |  65536 |   20 |         0 |     8 | True      |\n| 17521416-0ecf-4d85-8d4c-ec6fd1bc5f9d | cpu-su.1    |   2048 |   20 |         0 |     1 | True      |\n| 2b1dbea2-736d-4b85-b466-4410bba35f1e | cpu-su.8    |  16384 |   20 |         0 |     8 | True      |\n| 2f33578f-c3df-4210-b369-84a998d77dac | mem-su.4    |  32768 |   20 |         0 |     4 | True      |\n| 4498bfdb-5342-4e51-aa20-9ee74e522d59 | mem-su.1    |   8192 |   20 |         0 |     1 | True      |\n| 7f2f5f4e-684b-4c24-bfc6-3fce9cf1f446 | mem-su.16   | 131072 |   20 |         0 |    16 | True      |\n| 8c05db2f-6696-446b-9319-c32341a09c41 | cpu-su.16   |  32768 |   20 |         0 |    16 | True      |\n| 9662b5b2-aeaa-4d56-9bd3-450deee668af | cpu-su.4    |   8192 |   20 |         0 |     4 | True      |\n| b3377fdd-fd0f-4c88-9b4b-3b5c8ada0732 |gpu-su-a100.1|  98304 |   20 |         0 |    12 | True      |\n| e9125ab0-c8df-4488-a252-029c636cbd0f | mem-su.2    |  16384 |   20 |         0 |     2 | True      |\n| ee6417bd-7cd4-4431-a6ce-d09f0fba3ba9 | cpu-su.2    |   4096 |   20 |         0 |     2 | True      |\n+--------------------------------------+------------+--------+------+-----------+-------+------------+\n

          Get the image name and its ID,

          openstack image list  | grep almalinux-9\n| 263f045e-86c6-4344-b2de-aa475dbfa910 | almalinux-9-x86_64  | active |\n

          Get Private Virtual network details, which will be attached to the VM:

          openstack network list\n+--------------------------------------+-----------------+--------------------------------------+\n| ID                                   | Name            | Subnets                              |\n+--------------------------------------+-----------------+--------------------------------------+\n| 43613b84-e1fb-44a4-b1ea-c530edc49018 | provider        | 1cbbb98d-3b57-4f6d-8053-46045904d910 |\n| 8a91900b-d43c-474d-b913-930283e0bf43 | default_network | e62ce2fd-b11c-44ce-b7cc-4ca943e75a23 |\n+--------------------------------------+-----------------+--------------------------------------+\n

          Find the Security Group:

          openstack security group list\n+--------------------------------------+----------------------------------+-----------------------+----------------------------------+------+\n| ID                                   | Name                             | Description            |Project                          | Tags |\n+--------------------------------------+----------------------------------+-----------------------+----------------------------------+------+\n| 8285530a-34e3-4d96-8e01-a7b309a91f9f | default                          | Default security group |8ae3ae25c3a84c689cd24c48785ca23a | []   |\n| bbb738d0-45fb-4a9a-8bc4-a3eafeb49ba7 | ssh_only                         |                        |8ae3ae25c3a84c689cd24c48785ca23a | []   |\n+--------------------------------------+----------------------------------+-----------------------+----------------------------------+------+\n

          Find the Key pair, in my case you can choose your own,

          openstack keypair list | grep -i cloud_key\n| cloud_key | d5:ab:dc:1f:e5:08:44:7f:a6:21:47:23:85:32:cc:04 | ssh  |\n

          Note

          Above details will be different for you based on your project and env.

          "},{"location":"openstack/openstack-cli/launch-a-VM-using-openstack-CLI/#launch-an-instance-from-an-image","title":"Launch an instance from an Image","text":"

          Now we have all the details, let\u2019s create a virtual machine using \"openstack server create\" command

          Syntax :

          openstack server create --flavor {Flavor-Name-Or-Flavor-ID } \\\n    --image {Image-Name-Or-Image-ID} \\\n    --nic net-id={Network-ID} \\\n    --user-data USER-DATA-FILE \\\n    --security-group {Security_Group_ID} \\\n    --key-name {Keypair-Name} \\\n    --property KEY=VALUE \\\n    <Instance_Name>\n

          Important Note

          If you boot an instance with an \"Instance_Name\" greater than 63 characters, Compute truncates it automatically when turning it into a hostname to ensure the correct functionality of dnsmasq.

          Optionally, you can provide a key name for access control and a security group for security.

          You can also include metadata key and value pairs: --key-name {Keypair-Name}. For example, you can add a description for your server by providing the --property description=\"My Server\" parameter.

          You can pass user data in a local file at instance launch by using the --user-data USER-DATA-FILE parameter. If you do not provide a key pair, you will be unable to access the instance.

          You can also place arbitrary local files into the instance file system at creation time by using the --file <dest-filename=source-filename> parameter. You can store up to five files. For example, if you have a special authorized keys file named special_authorized_keysfile that you want to put on the instance rather than using the regular SSH key injection, you can add the \u2013file option as shown in the following example.

          --file /root/.ssh/authorized_keys=special_authorized_keysfile\n

          To create a VM in Specific \"Availability Zone and compute Host\" specify --availability-zone {Availbility-Zone-Name}:{Compute-Host} in above syntax.

          Example:

          openstack server create --flavor cpu-su.2 \\\n    --image almalinux-8-x86_64 \\\n    --nic net-id=8ee63932-464b-4999-af7e-949190d8fe93 \\\n    --security-group default \\\n    --key-name cloud_key \\\n    --property description=\"My Server\" \\\n    my-vm\n

          NOTE: To get more help on \"openstack server create\" command , use:

          openstack -h server create\n

          Detailed syntax:

          openstack server create\n  (--image <image> | --volume <volume>)\n  --flavor <flavor>\n  [--security-group <security-group>]\n  [--key-name <key-name>]\n  [--property <key=value>]\n  [--file <dest-filename=source-filename>]\n  [--user-data <user-data>]\n  [--availability-zone <zone-name>]\n  [--block-device-mapping <dev-name=mapping>]\n  [--nic <net-id=net-uuid,v4-fixed-ip=ip-addr,v6-fixed-ip=ip-addr,port-id=port-uuid,auto,none>]\n  [--network <network>]\n  [--port <port>]\n  [--hint <key=value>]\n  [--config-drive <config-drive-volume>|True]\n  [--min <count>]\n  [--max <count>]\n  [--wait]\n  <server-name>\n

          Note

          Similarly, we can lauch a VM using bootable \"Volume\" as described here.

          Now verify the test vm \"my-vm\" is \"Running\" using the following commands:

          openstack server list | grep my-vm\n

          OR,

          openstack server show my-vm\n
          "},{"location":"openstack/openstack-cli/launch-a-VM-using-openstack-CLI/#check-console-of-virtual-machine","title":"Check console of virtual machine","text":"

          The console for a Linux VM can be displayed using console log.

          openstack console log show --line 20 my-vm\n
          "},{"location":"openstack/openstack-cli/launch-a-VM-using-openstack-CLI/#associating-a-floating-ip-to-vm","title":"Associating a Floating IP to VM","text":"

          To Associate a Floating IP to VM, first get the unused Floating IP using the following command:

          openstack floating ip list | grep None | head -2\n| 071f08ac-cd10-4b89-aee4-856ead8e3ead | 169.144.107.154 | None |\nNone                                 |\n| 1baf4232-9cb7-4a44-8684-c604fa50ff60 | 169.144.107.184 | None |\nNone                                 |\n

          Now Associate the first IP to the server using the following command:

          openstack server add floating ip my-vm 169.144.107.154\n

          Use the following command to verify whether Floating IP is assigned to the VM or not:

          openstack server list | grep my-vm\n| 056c0937-6222-4f49-8405-235b20d173dd | my-vm | ACTIVE  | ...\nnternal=192.168.15.62, 169.144.107.154 |\n
          "},{"location":"openstack/openstack-cli/launch-a-VM-using-openstack-CLI/#remove-existing-floating-ip-from-the-vm","title":"Remove existing floating ip from the VM","text":"
          openstack server remove floating ip <INSTANCE_NAME_OR_ID> <FLOATING_IP_ADDRESS>\n
          "},{"location":"openstack/openstack-cli/launch-a-VM-using-openstack-CLI/#get-all-available-security-group-in-your-project","title":"Get all available security group in your project","text":"
          openstack security group list\n+--------------------------------------+----------+-----------------------+----------------------------------+------+\n| 3ca248ac-56ac-4e5f-a57c-777ed74bbd7c | default  | Default security group |\nf01df1439b3141f8b76e68a3b58ef74a | []   |\n| 5cdc5f33-78fc-4af8-bf25-60b8d4e5db2a | ssh_only | Enable SSH access.     |\nf01df1439b3141f8b76e68a3b58ef74a | []   |\n+--------------------------------------+----------+-----------------------+----------------------------------+------+\n
          "},{"location":"openstack/openstack-cli/launch-a-VM-using-openstack-CLI/#add-existing-security-group-to-the-vm","title":"Add existing security group to the VM","text":"
          openstack server add security group <INSTANCE_NAME_OR_ID> <SECURITY_GROUP>\n

          Example:

          openstack server add security group my-vm ssh_only\n
          "},{"location":"openstack/openstack-cli/launch-a-VM-using-openstack-CLI/#remove-existing-security-group-from-the-vm","title":"Remove existing security group from the VM","text":"
          openstack server remove security group <INSTANCE_NAME_OR_ID> <SECURITY_GROUP>\n

          Example:

          openstack server remove security group my-vm ssh_only\n

          Alternatively, you can use the openstack port unset command to remove the group from a port:

          openstack port unset --security-group <SECURITY_GROUP> <PORT>\n
          "},{"location":"openstack/openstack-cli/launch-a-VM-using-openstack-CLI/#adding-volume-to-the-vm","title":"Adding volume to the VM","text":"
          openstack server add volume\n  [--device <device>]\n  <INSTANCE_NAME_OR_ID>\n  <VOLUME_NAME_OR_ID>\n
          "},{"location":"openstack/openstack-cli/launch-a-VM-using-openstack-CLI/#remove-existing-volume-from-the-vm","title":"Remove existing volume from the VM","text":"
          openstack server remove volume <INSTANCE_NAME_OR_ID> <volume>\n
          "},{"location":"openstack/openstack-cli/launch-a-VM-using-openstack-CLI/#reboot-a-virtual-machine","title":"Reboot a virtual machine","text":"
          openstack server reboot my-vm\n
          "},{"location":"openstack/openstack-cli/launch-a-VM-using-openstack-CLI/#deleting-virtual-machine-from-command-line","title":"Deleting Virtual Machine from Command Line","text":"
          openstack server delete my-vm\n
          "},{"location":"openstack/openstack-cli/openstack-CLI/","title":"OpenStack CLI","text":""},{"location":"openstack/openstack-cli/openstack-CLI/#references","title":"References","text":"

          OpenStack Command Line Client(CLI) Cheat Sheet

          The OpenStack CLI is designed for interactive use. OpenStackClient (aka OSC) is a command-line client for OpenStack that brings the command set for Compute, Identity, Image, Object Storage and Block Storage APIs together in a single shell with a uniform command structure. OpenStackClient is primarily configured using command line options and environment variables. Most of those settings can also be placed into a configuration file to simplify managing multiple cloud configurations. Most global options have a corresponding environment variable that may also be used to set the value. If both are present, the command-line option takes priority.

          It's also possible to call it from a bash script or similar, but typically it is too slow for heavy scripting use.

          "},{"location":"openstack/openstack-cli/openstack-CLI/#command-line-setup","title":"Command Line setup","text":"

          To use the CLI, you must create an application credentials and set the appropriate environment variables.

          You can download the environment file with the credentials from the OpenStack dashboard.

          • Log in to the NERC's OpenStack dashboard, choose the project for which you want to download the OpenStack RC file.

          • Navigate to Identity -> Application Credentials.

          • Click on \"Create Application Credential\" button and provide a Name and Roles for the application credential. All other fields are optional and leaving the \"Secret\" field empty will set it to autogenerate (recommended).

          Important Note

          Please note that an application credential is only valid for a single project, and to access multiple projects you need to create an application credential for each. You can switch projects by clicking on the project name at the top right corner and choosing from the dropdown under \"Project\".

          After clicking \"Create Application Credential\" button, the ID and Secret will be displayed and you will be prompted to Download openrc file or to Download clouds.yaml. Both of these are different methods of configuring the client for CLI access. Please save the file.

          "},{"location":"openstack/openstack-cli/openstack-CLI/#configuration","title":"Configuration","text":"

          The CLI is configured via environment variables and command-line options as listed in Authentication.

          "},{"location":"openstack/openstack-cli/openstack-CLI/#configuration-files","title":"Configuration Files","text":""},{"location":"openstack/openstack-cli/openstack-CLI/#openstack-rc-file","title":"OpenStack RC File","text":"

          Find the file (by default it will be named the same as the application credential name with the suffix -openrc.sh where project is the name of your OpenStack project).

          Source your downloaded OpenStack RC File:

          source app-cred-<Credential_Name>-openrc.sh\n

          Important Note

          When you source the file, environment variables are set for your current shell. The variables enable the openstack client commands to communicate with the OpenStack services that run in the cloud. This just stores your entry into the environment variable - there's no validation at this stage. You can inspect the downloaded file to retrieve the ID and Secret if necessary and see what other environment variables are set.

          "},{"location":"openstack/openstack-cli/openstack-CLI/#cloudsyaml","title":"clouds.yaml","text":"

          clouds.yaml is a configuration file that contains everything needed to connect to one or more clouds. It may contain private information and is generally considered private to a user.

          For more information on configuring the OpenStackClient with clouds.yaml please see the OpenStack documentation.

          "},{"location":"openstack/openstack-cli/openstack-CLI/#install-the-openstack-command-line-clients","title":"Install the OpenStack command-line clients","text":"

          For more information on configuring the OpenStackClient please see the OpenStack documentation.

          "},{"location":"openstack/openstack-cli/openstack-CLI/#openstack-hello-world","title":"OpenStack Hello World","text":"

          Generally, the OpenStack terminal client offers the following methods:

          • list: Lists information about objects currently in the cloud.

          • show: Displays information about a single object currently in the cloud.

          • create: Creates a new object in the cloud.

          • set: Edits an existing object in the cloud.

          To test that you have everything configured, try out some commands. The following command lists all the images available to your project:

          openstack image list\n+--------------------------------------+---------------------+--------+\n| ID                                   | Name                | Status |\n+--------------------------------------+---------------------+--------+\n| a9b48e65-0cf9-413a-8215-81439cd63966 | MS-Windows-2022     | active |\n| cfecb5d4-599c-4ffd-9baf-9cbe35424f97 | almalinux-8-x86_64  | active |\n| 263f045e-86c6-4344-b2de-aa475dbfa910 | almalinux-9-x86_64  | active |\n| 41fa5991-89d5-45ae-8268-b22224c772b2 | debian-10-x86_64    | active |\n| 99194159-fcd1-4281-b3e1-15956c275692 | fedora-36-x86_64    | active |\n| 74a33f77-fc42-4dd1-a5a2-55fb18fc50cc | rocky-8-x86_64      | active |\n| d7d41e5f-58f4-4ba6-9280-7fef9ac49060 | rocky-9-x86_64      | active |\n| 75a40234-702b-4ab7-9d83-f436b05827c9 | ubuntu-18.04-x86_64 | active |\n| 8c87cf6f-32f9-4a4b-91a5-0d734b7c9770 | ubuntu-20.04-x86_64 | active |\n| da314c41-19bf-486a-b8da-39ca51fd17de | ubuntu-22.04-x86_64 | active |\n+--------------------------------------+---------------------+--------+\n

          If you have launched some instances already, the following command shows a list of your project's instances:

          openstack server list --fit-width\n+--------------------------------------+------------------+--------+----------------------------------------------+--------------------------+--------------+\n| ID                                   | Name             | Status | Networks                                     | Image                    |  Flavor      |\n+--------------------------------------+------------------+--------+----------------------------------------------+--------------------------+--------------+\n| 1c96ba49-a20f-4c88-bbcf-93e2364365f5 |    vm-test       | ACTIVE | default_network=192.168.0.146, 199.94.60.4   | N/A (booted from volume) |  cpu-su.4     |\n| dd0d8053-ab88-4d4f-b5bc-97e7e2fe035a |    gpu-test      | ACTIVE | default_network=192.168.0.146, 199.94.60.4   | N/A (booted from volume) |  gpu-su-a100.1  |\n+--------------------------------------+------------------+--------+----------------------------------------------+--------------------------+--------------+\n

          How to fit the CLI output to your terminal?

          You can use --fit-width at the end of the command to fit the output to your terminal.

          If you don't have any instances, you will get the error list index out of range, which is why we didn't suggest this command for your first test:

          openstack server list\nlist index out of range\n

          If you see this error:

          openstack server list\nThe request you have made requires authentication. (HTTP 401) (Request-ID: req-6a827bf3-d5e8-47f2-984c-b6edeeb2f7fb)\n

          Then your environment variables are likely not configured correctly.

          The most common reason is that you made a typo when entering your password. Try sourcing the OpenStack RC file again and retyping it.

          You can type openstack -h to see a list of available commands.

          Note

          This includes some admin-only commands.

          If you try one of these by mistake, you might see this output:

          openstack user list\nYou are not authorized to perform the requested action: identity:list_users.\n(HTTP 403) (Request-ID: req-cafe1e5c-8a71-44ab-bd21-0e0f25414062)\n

          Depending on your needs for API interaction, this might be sufficient.

          If you just occasionally want to run 1 or 2 of these commands from your terminal, you can do it manually or write a quick bash script that makes use of this CLI.

          However, this isn't a very optimized way to do complex interactions with OpenStack. For that, you want to write scripts that interact with the python SDK bindings directly.

          Pro Tip

          If you find yourself fiddling extensively with awk and grep to extract things like project IDs from the CLI output, it's time to move on to using the client libraries or the RESTful API directly in your scripts.

          "},{"location":"openstack/persistent-storage/attach-the-volume-to-an-instance/","title":"Attach The Volume To An Instance","text":""},{"location":"openstack/persistent-storage/attach-the-volume-to-an-instance/#using-horizon-dashboard","title":"Using Horizon dashboard","text":"

          Once you're logged in to NERC's Horizon dashboard.

          Navigate to Project -> Volumes -> Volumes.

          In the Actions column, click the dropdown and select \"Manage Attachments\".

          From the menu, choose the instance you want to connect the volume to from Attach to Instance, and click \"Attach Volume\".

          The volume now has a status of \"In-use\" and \"Attached To\" column shows which instance it is attached to, and what device name it has.

          This will be something like /dev/vdb but it can vary depending on the state of your instance, and whether you have attached volumes before.

          Make note of the device name of your volume.

          "},{"location":"openstack/persistent-storage/attach-the-volume-to-an-instance/#using-the-cli","title":"Using the CLI","text":"

          Prerequisites:

          To run the OpenStack CLI commands, you need to have:

          • OpenStack CLI setup, see OpenStack Command Line setup for more information.

          To attach the volume to an instance using the CLI, do this:

          "},{"location":"openstack/persistent-storage/attach-the-volume-to-an-instance/#using-the-openstack-client","title":"Using the openstack client","text":"

          When the status is 'available', the volume can be attached to a virtual machine using the following openstack client command syntax:

          openstack server add volume <INSTANCE_NAME_OR_ID> <VOLUME_NAME_OR_ID>\n

          For example:

          openstack server add volume test-vm my-volume\n+-----------------------+--------------------------------------+\n| Field                 | Value                                |\n+-----------------------+--------------------------------------+\n| ID                    | 5b5380bd-a15b-408b-8352-9d4219cf30f3 |\n| Server ID             | 8a876a17-3407-484c-85c4-8a46fbac1607 |\n| Volume ID             | 5b5380bd-a15b-408b-8352-9d4219cf30f3 |\n| Device                | /dev/vdb                             |\n| Tag                   | None                                 |\n| Delete On Termination | False                                |\n+-----------------------+--------------------------------------+\n

          where \"test-vm\" is the virtual machine and the second parameter, \"my-volume\" is the volume created before.

          Pro Tip

          If your instance name <INSTANCE_NAME_OR_ID> and volume name <VOLUME_NAME_OR_ID> include spaces, you need to enclose them in quotes, i.e. \"<INSTANCE_NAME_OR_ID>\" and \"<VOLUME_NAME_OR_ID>\".

          For example: openstack server remove volume \"My Test Instance\" \"My Volume\".

          "},{"location":"openstack/persistent-storage/attach-the-volume-to-an-instance/#to-verify-the-volume-is-attached-to-the-vm","title":"To verify the volume is attached to the VM","text":"
          openstack volume list\n+--------------------------------------+-----------------+--------+------+----------------------------------+\n| ID                                   | Name            | Status | Size | Attached to                      |\n+--------------------------------------+-----------------+--------+------+----------------------------------+\n| 563048c5-d27b-4397-bb4e-034e0f4d9fa7 |                 | in-use |   20 | Attached to test-vm on /dev/vda  |\n| 5b5380bd-a15b-408b-8352-9d4219cf30f3 | my-volume       | in-use |   20 | Attached to test-vm on /dev/vdb  |\n+--------------------------------------+-----------------+--------+------+----------------------------------+\n

          The volume now has a status of \"in-use\" and \"Attached To\" column shows which instance it is attached to, and what device name it has.

          This will be something like /dev/vdb but it can vary depending on the state of your instance, and whether you have attached volumes before.

          "},{"location":"openstack/persistent-storage/create-an-empty-volume/","title":"Create An Empty Volume","text":"

          An empty volume is like an unformatted USB stick. We'll attach it to an instance, create a filesystem on it, and mount it to the instance.

          "},{"location":"openstack/persistent-storage/create-an-empty-volume/#using-horizon-dashboard","title":"Using Horizon dashboard","text":"

          Once you're logged in to NERC's Horizon dashboard, you can create a volume via the \"Volumes -> Volumes\" page by clicking on the \"Create Volume\" button.

          Navigate to Project -> Volumes -> Volumes.

          Click \"Create Volume\".

          In the Create Volume dialog box, give your volume a name. The description field is optional.

          Choose \"empty volume\" from the Source dropdown. This will create a volume that is like an unformatted hard disk. Choose a size (In GiB) for your volume. Leave Type and Availibility Zone as it as. Only admin to the NERC OpenStack will be able to manage volume types.

          Click \"Create Volume\" button.

          Checking the status of created volume will show:

          \"downloading\" means that the volume contents is being transferred from the image service to the volume service

          In a few moments, the newly created volume will appear in the Volumes list with the Status \"available\". \"available\" means the volume can now be used for booting. A set of volume_image meta data is also copied from the image service.

          "},{"location":"openstack/persistent-storage/create-an-empty-volume/#using-the-cli","title":"Using the CLI","text":"

          Prerequisites:

          To run the OpenStack CLI commands, you need to have:

          • OpenStack CLI setup, see OpenStack Command Line setup for more information.

          To create a volume using the CLI, do this:

          "},{"location":"openstack/persistent-storage/create-an-empty-volume/#using-the-openstack-client","title":"Using the openstack client","text":"

          This allows an arbitrary sized disk to be attached to your virtual machine, like plugging in a USB stick. The steps below create a disk of 20 gibibytes (GiB) with name \"my-volume\".

          openstack volume create --size 20 my-volume\n\n+---------------------+--------------------------------------+\n| Field               | Value                                |\n+---------------------+--------------------------------------+\n| attachments         | []                                   |\n| availability_zone   | nova                                 |\n| bootable            | false                                |\n| consistencygroup_id | None                                 |\n| created_at          | 2024-02-03T17:06:05.000000           |\n| description         | None                                 |\n| encrypted           | False                                |\n| id                  | 5b5380bd-a15b-408b-8352-9d4219cf30f3 |\n| multiattach         | False                                |\n| name                | my-volume                            |\n| properties          |                                      |\n| replication_status  | None                                 |\n| size                | 20                                   |\n| snapshot_id         | None                                 |\n| source_volid        | None                                 |\n| status              | creating                             |\n| type                | tripleo                              |\n| updated_at          | None                                 |\n| user_id             | 938eb8bfc72e4ca3ad2b94e2eb4059f7     |\n+---------------------+--------------------------------------+\n
          "},{"location":"openstack/persistent-storage/create-an-empty-volume/#to-view-newly-created-volume","title":"To view newly created volume","text":"
          openstack volume list\n+--------------------------------------+-----------------+-----------+------+----------------------------------+\n| ID                                   | Name            | Status    | Size | Attached to                      |\n+--------------------------------------+-----------------+-----------+------+----------------------------------+\n| 563048c5-d27b-4397-bb4e-034e0f4d9fa7 |                 | in-use    |   20 | Attached to test-vm on /dev/vda  |\n| 5b5380bd-a15b-408b-8352-9d4219cf30f3 | my-volume       | available |   20 |                                  |\n+--------------------------------------+-----------------+-----------+------+----------------------------------+\n
          "},{"location":"openstack/persistent-storage/delete-volumes/","title":"Delete Volumes","text":""},{"location":"openstack/persistent-storage/delete-volumes/#using-horizon-dashboard","title":"Using Horizon dashboard","text":"

          Once you're logged in to NERC's Horizon dashboard.

          Navigate to Project -> Volumes -> Volumes.

          Select the volume or volumes that you want to delete.

          Click \"Delete Volumes\" button.

          In the Confirm Delete Volumes window, click the Delete Volumes button to confirm the action.

          Unable to Delete Volume

          You cannot delete a bootable volume that is actively in use by a running VM. If you really want to delete such volume then first delete the insance and then you are allowed to delete the detached volume. Before deleting Please make sure during the launch of this insance is using the default selected No for \"Delete Volume on Instance Delete\" configuration option. If you had set this configuration \"Yes\" for \"Delete Volume on Instance Delete\", then deleting the instance will automatically remove the associated volume.

          "},{"location":"openstack/persistent-storage/delete-volumes/#using-the-cli","title":"Using the CLI","text":"

          Prerequisites:

          To run the OpenStack CLI commands, you need to have:

          • OpenStack CLI setup, see OpenStack Command Line setup for more information.

          To delete a volume using the CLI, do this:

          "},{"location":"openstack/persistent-storage/delete-volumes/#using-the-openstack-client","title":"Using the openstack client","text":"

          The following openstack client command syntax can be used to delete a volume:

          openstack volume delete <VOLUME_NAME_OR_ID>\n

          For example:

          openstack volume delete my-volume\n

          Pro Tip

          If your volume name <VOLUME_NAME_OR_ID> include spaces, you need to enclose them in quotes, i.e. \"<VOLUME_NAME_OR_ID>\".

          For example: openstack volume delete \"My Volume\".

          Your volume will now go into state 'deleting' and completely disappear from the openstack volume list output.

          "},{"location":"openstack/persistent-storage/detach-a-volume/","title":"Detach A Volume and Attach it to an instance","text":""},{"location":"openstack/persistent-storage/detach-a-volume/#detach-a-volume","title":"Detach A Volume","text":""},{"location":"openstack/persistent-storage/detach-a-volume/#using-horizon-dashboard","title":"Using Horizon dashboard","text":"

          Once you're logged in to NERC's Horizon dashboard.

          Navigate to Project -> Volumes -> Volumes.

          To detach a mounted volume by going back to \"Manage Attachments\" and choosing Detach Volume.

          This will popup the following interface to proceed:

          Unable to Detach Volume

          If your bootable volume that is attached to a VM then that volume cannot be detached as this is a root device volume. This bootable volume is created when you launch an instance from an Image or an Instance Snapshot, and the choice for utilizing persistent storage is configured by selecting the Yes option for \"Create New Volume\". If you explicitly chosen as \"No\" for this option then there will be no attach volume created for the instance instead an Ephemeral disk storage is used.

          "},{"location":"openstack/persistent-storage/detach-a-volume/#using-the-cli","title":"Using the CLI","text":"

          Prerequisites:

          To run the OpenStack CLI commands, you need to have:

          • OpenStack CLI setup, see OpenStack Command Line setup for more information.
          "},{"location":"openstack/persistent-storage/detach-a-volume/#using-the-openstack-client","title":"Using the openstack client","text":"

          The following openstack client command syntax can be used to detach a volume from a VM:

          openstack server remove volume <INSTANCE_NAME_OR_ID> <VOLUME_NAME_OR_ID>.\n

          For example:

          openstack server remove volume test-vm my-volume.\n

          where \"test-vm\" is the virtual machine and the second parameter, \"my-volume\" is the volume created before and attached to the VM and can be shown in openstack volume list.

          Pro Tip

          If your instance name <INSTANCE_NAME_OR_ID> and volume name <VOLUME_NAME_OR_ID> include spaces, you need to enclose them in quotes, i.e. \"<INSTANCE_NAME_OR_ID>\" and \"<VOLUME_NAME_OR_ID>\".

          For example: openstack server remove volume \"My Test Instance\" \"My Volume\".

          Check that the volume is in state 'available' again.

          If that's the case, the volume is now ready to either be attached to another virtual machine or, if it is not needed any longer, to be completely deleted (please note that this step cannot be reverted!).

          "},{"location":"openstack/persistent-storage/detach-a-volume/#attach-the-detached-volume-to-an-instance","title":"Attach the detached volume to an instance","text":"

          Once it is successfully detached, you can use \"Manage Attachments\" to attach it to another instance if desired as explained here.

          OR,

          You can attach the existing volume (Detached!) to the new instance as shown below:

          After this run the following commands as root user to mount it:

          mkdir /mnt/test_volume\nmount /dev/vdb /mnt/test_volume\n

          All the previous data from previous instance will be available under the mounted folder at /mnt/test_volume.

          Very Important Note

          Also, a given volume might not get the same device name the second time you attach it to an instance.

          "},{"location":"openstack/persistent-storage/extending-volume/","title":"Extending Volume","text":"

          A volume can be made larger while maintaining the existing contents, assuming the file system supports resizing. We can extend a volume that is not attached to any VM and in \"Available\" status.

          The steps are as follows:

          • Extend the volume to its new size

          • Extend the filesystem to its new size

          "},{"location":"openstack/persistent-storage/extending-volume/#using-horizon-dashboard","title":"Using Horizon dashboard","text":"

          Once you're logged in to NERC's Horizon dashboard.

          Navigate to Project -> Volumes -> Volumes.

          Specify, the new extened size in GiB:

          "},{"location":"openstack/persistent-storage/extending-volume/#using-the-cli","title":"Using the CLI","text":"

          Prerequisites:

          To run the OpenStack CLI commands, you need to have:

          • OpenStack CLI setup, see OpenStack Command Line setup for more information.
          "},{"location":"openstack/persistent-storage/extending-volume/#using-the-openstack-client","title":"Using the openstack client","text":"

          The following openstack client command syntax can be used to extend any existing volume from its previous size to a new size of :

          openstack volume set --size <NEW_SIZE_IN_GiB> <VOLUME_NAME_OR_ID>\n

          For example:

          openstack volume set --size 100 my-volume\n

          where \"my-volume\" is the existing volume with a size of 80 GiB and is going to be extended to a new size of 100 GiB.\"

          Pro Tip

          If your volume name <VOLUME_NAME_OR_ID> includes spaces, you need to enclose them in quotes, i.e. \"<VOLUME_NAME_OR_ID>\".

          For example: openstack volume set --size 100 \"My Volume\".

          For windows systems, please follow the provider documentation.

          Please note

          • Volumes can be made larger, but not smaller. There is no support for shrinking existing volumes.

          • The procedure given above has been tested with ext4 and XFS filesystems only.

          "},{"location":"openstack/persistent-storage/format-and-mount-the-volume/","title":"Format And Mount The Volume","text":"

          Prerequisites:

          Before formatting and mounting the volume, you need to have already created a new volume as referred here and attached it to any running VM, as described here.

          "},{"location":"openstack/persistent-storage/format-and-mount-the-volume/#for-linux-based-virtual-machine","title":"For Linux based virtual machine","text":"

          To verify that the newly created volume, \"my-volume\", exists and is attached to a VM, \"test-vm\", run this openstack client command:

          openstack volume list\n+--------------------------------------+-----------------+--------+------+----------------------------------+\n| ID                                   | Name            | Status | Size | Attached to                      |\n+--------------------------------------+-----------------+--------+------+----------------------------------+\n| 563048c5-d27b-4397-bb4e-034e0f4d9fa7 |                 | in-use |   20 | Attached to test-vm on /dev/vda  |\n| 5b5380bd-a15b-408b-8352-9d4219cf30f3 | my-volume       | in-use |   20 | Attached to test-vm on /dev/vdb  |\n+--------------------------------------+-----------------+--------+------+----------------------------------+\n

          The volume has a status of \"in-use\" and \"Attached To\" column shows which instance it is attached to, and what device name it has.

          This will be something like /dev/vdb but it can vary depending on the state of your instance, and whether you have attached volumes before.

          Make note of the device name of your volume.

          SSH into your instance. You should now see the volume as an additional disk in the output of sudo fdisk -l or lsblk or cat /proc/partitions.

          # lsblk\nNAME    MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT\n...\nvda     254:0    0   10G  0 disk\n\u251c\u2500vda1  254:1    0  9.9G  0 part /\n\u251c\u2500vda14 254:14   0    4M  0 part\n\u2514\u2500vda15 254:15   0  106M  0 part /boot/efi\nvdb     254:16   0    1G  0 disk\n

          Here, we see the volume as the disk vdb, which matches the /dev/vdb/ we previously noted in the \"Attached To\" column.

          Create a filesystem on the volume and mount it. In this example, we will create an ext4 filesystem:

          Run the following commands as root user:

          mkfs.ext4 /dev/vdb\nmkdir /mnt/test_volume\nmount /dev/vdb /mnt/test_volume\ndf -H\n

          The volume is now available at the mount point:

          lsblk\nNAME    MAJ:MIN RM  SIZE RO TYPE MOUNTPOINT\n...\nvda     254:0    0   10G  0 disk\n\u251c\u2500vda1  254:1    0  9.9G  0 part /\n\u251c\u2500vda14 254:14   0    4M  0 part\n\u2514\u2500vda15 254:15   0  106M  0 part /boot/efi\nvdb     254:16   0    1G  0 disk /mnt/test_volume\n

          If you place data in the directory /mnt/test_volume, detach the volume, and mount it to another instance, the second instance will have access to the data.

          Important Note

          In this case it's easy to spot because there is only one additional disk attached to the instance, but it's important to keep track of the device name, especially if you have multiple volumes attached.

          "},{"location":"openstack/persistent-storage/format-and-mount-the-volume/#for-windows-virtual-machine","title":"For Windows virtual machine","text":"

          Here, we create an empty volume following the steps outlined in this documentation.

          Please make sure you are creating volume of the size 100 GiB:

          Then attach the newly created volume to a running Windows VM:

          Login remote desktop using the Floating IP attached to the Windows VM:

          What is the user login for Windows Server 2022?

          The default username is \"Administrator,\" and the password is the one you set using the user data PowerShell script during the launch as described here.

          Once connected search for \"Disk Management\" from Windows search box. This will show all attached disk as Unknown and Offline as shown here:

          In Disk Management, select and hold (or right-click) the disk you want to initialize, and then select \"Initialize Disk\". If the disk is listed as Offline, first select and hold (or right-click) the disk, and then select \"Online\".

          In the Initialize Disk dialog box, make sure the correct disk is selected, and then choose OK to accept the default partition style. If you need to change the partition style (GPT or MBR), see Compare partition styles - GPT and MBR.

          Format the New Volume:

          • Select and hold (or right-click) the unallocated space of the new disk.

          • Select \"New Simple Volume\" and follow the wizard to create a new partition.

          • Choose the file system (usually NTFS for Windows).

          • Assign a drive letter or mount point.

          Complete Formatting:

          • Complete the wizard to format the new volume.

          • Once formatting is complete, the new volume should be visible in File Explorer as shown below:

          "},{"location":"openstack/persistent-storage/mount-the-object-storage/","title":"Mount The Object Storage To An Instance","text":""},{"location":"openstack/persistent-storage/mount-the-object-storage/#pre-requisite","title":"Pre-requisite","text":"

          We are using following setting for this purpose to mount the object storage to an NERC OpenStack VM:

          • 1 Linux machine, ubuntu-22.04-x86_64 or your choice of Ubuntu OS image, cpu-su.2 flavor with 2vCPU, 8GB RAM, 20GB storage - also assign Floating IP to this VM.

          • Setup and enable your S3 API credentials:

          To access the API credentials, you must login through the OpenStack Dashboard and navigate to \"Projects > API Access\" where you can download the \"Download OpenStack RC File\" as well as the \"EC2 Credentials\".

          While clicking on \"EC2 Credentials\", this will download a file zip file including ec2rc.sh file that has content similar to shown below. The important parts are EC2_ACCESS_KEY and EC2_SECRET_KEY, keep them noted.

            #!/bin/bash\n\n  NOVARC=$(readlink -f \"${BASH_SOURCE:-${0}}\" 2>/dev/null) || NOVARC=$(python -c 'import os,sys; print os.path.abspath(os.path.realpath(sys.argv[1]))' \"${BASH_SOURCE:-${0}}\")\n  NOVA_KEY_DIR=${NOVARC%/*}\n  export EC2_ACCESS_KEY=...\n  export EC2_SECRET_KEY=...\n  export EC2_URL=https://localhost/notimplemented\n  export EC2_USER_ID=42 # nova does not use user id, but bundling requires it\n  export EC2_PRIVATE_KEY=${NOVA_KEY_DIR}/pk.pem\n  export EC2_CERT=${NOVA_KEY_DIR}/cert.pem\n  export NOVA_CERT=${NOVA_KEY_DIR}/cacert.pem\n  export EUCALYPTUS_CERT=${NOVA_CERT} # euca-bundle-image seems to require this set\n\n  alias ec2-bundle-image=\"ec2-bundle-image --cert ${EC2_CERT} --privatekey ${EC2_PRIVATE_KEY} --user 42 --ec2cert ${NOVA_CERT}\"\n  alias ec2-upload-bundle=\"ec2-upload-bundle -a ${EC2_ACCESS_KEY} -s ${EC2_SECRET_KEY} --url ${S3_URL} --ec2cert ${NOVA_CERT}\"\n

          Alternatively, you can obtain your EC2 access keys using the openstack client:

            sudo apt install python3-openstackclient\n\n  openstack ec2 credentials list\n  +------------------+------------------+--------------+-----------+\n  | Access           | Secret           | Project ID   | User ID   |\n  +------------------+------------------+--------------+-----------+\n  | <EC2_ACCESS_KEY> | <EC2_SECRET_KEY> | <Project_ID> | <User_ID> |\n  +------------------+------------------+--------------+-----------+\n

          OR, you can even create a new one by running:

            openstack ec2 credentials create\n
          • Source the downloaded OpenStack RC File from Projects > API Access by using: source *-openrc.sh command. Sourcing the RC File will set the required environment variables.

          • Allow Other User option by editing fuse config by editing /etc/fuse.conf file and uncomment \"user_allow_other\" option.

            sudo nano /etc/fuse.conf\n

          The output going to look like this:

          A comparative analysis of Mountpoint for S3, Goofys, and S3FS.

          When choosing between S3 clients that enable the utilization of an object store with applications expecting files, it's essential to consider the specific use case and whether the convenience and compatibility provided by FUSE clients match the project's requirements.

          To delve into a comparative analysis of Mountpoint for S3, Goofys, and S3FS, please read this blog post.

          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#1-using-mountpoint-for-amazon-s3","title":"1. Using Mountpoint for Amazon S3","text":"

          Mountpoint for Amazon S3 is a high-throughput open-source file client designed to mount an Amazon S3 bucket as a local file system. Mountpoint is optimized for workloads that need high-throughput read and write access to data stored in S3 Object Storage through a file system interface.

          Very Important Note

          Mountpoint for Amazon S3 intentionally does not implement the full POSIX standard specification for file systems. Mountpoint supports file-based workloads that perform sequential and random reads, sequential (append only) writes, and that don\u2019t need full POSIX semantics.

          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#install-mountpoint","title":"Install Mountpoint","text":"

          Access your virtual machine using SSH. Update the packages on your system and install wget to be able to download the mount-s3 binary directly to your VM:

          sudo apt update && sudo apt upgrade\nsudo apt install wget\n

          Now, navigate to your home directory:

          cd\n
          1. Download the Mountpoint for Amazon S3 package using wget command:

            wget https://s3.amazonaws.com/mountpoint-s3-release/latest/x86_64/mount-s3.deb\n
          2. Install the package by entering the following command:

            sudo apt-get install ./mount-s3.deb\n
          3. Verify that Mountpoint for Amazon S3 is successfully installed by entering the following command:

            mount-s3 --version\n

            You should see output similar to the following:

            mount-s3 1.6.0\n
          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#configuring-and-using-mountpoint","title":"Configuring and using Mountpoint","text":"

          Make a folder to store your credentials:

          mkdir ~/.aws/\n

          Create file ~/.aws/credentials using your favorite text editor (for example nano or vim). Add the following contents to it which requires the EC2_ACCESS_KEY and EC2_SECRET_KEY keys that you noted from ec2rc.sh file (during the \"Setup and enable your S3 API credentials\" step):

          [nerc]\naws_access_key_id=<EC2_ACCESS_KEY>\naws_secret_access_key=<EC2_SECRET_KEY>\n

          Save the file and exit the text editor.

          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#create-a-local-directory-as-a-mount-point","title":"Create a local directory as a mount point","text":"
          mkdir -p ~/bucket1\n
          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#mount-the-container-locally-using-mountpoint","title":"Mount the Container locally using Mountpoint","text":"

          The object storage container i.e. \"bucket1\" will be mounted in the directory ~/bucket1

          mount-s3 --profile \"nerc\" --endpoint-url \"https://stack.nerc.mghpcc.org:13808\" --allow-other --force-path-style --debug bucket1 ~/bucket1/\n

          In this command,

          • mount-s3 is the Mountpoint for Amazon S3 package as installed in /usr/bin/ path we don't need to specify the full path.

          • --profile corresponds to the name given on the ~/.aws/credentials file i.e. [nerc].

          • --endpoint-url corresponds to the Object Storage endpoint url for NERC Object Storage. You don't need to modify this url.

          • --allow-other: Allows other users to access the mounted filesystem. This is particularly useful when multiple users need to access the mounted S3 bucket. Only allowed if user_allow_other is set in /etc/fuse.conf.

          • --force-path-style: Forces the use of path-style URLs when accessing the S3 bucket. This is necessary when working with certain S3-compatible storage services that do not support virtual-hosted-style URLs.

          • --debug: Enables debug mode, providing additional information about the mounting process.

          • bucket1 is the name of the container which contains the NERC Object Storage resources.

          • ~/bucket1 is the location of the folder in which you want to mount the Object Storage filesystem.

          Important Note

          Mountpoint automatically configures reasonable defaults for file system settings such as permissions and performance. However, if you require finer control over how the Mountpoint file system behaves, you can adjust these settings accordingly. For further details, please refer to this resource.

          In order to test whether the mount was successful, navigate to the directory in which you mounted the NERC container repository, for example:

          cd ~/bucket1\n

          Use the ls command to list its content. You should see the output similar to this:

          ls\n\nREADME.md   image.png   test-file\n

          The NERC Object Storage container repository has now been mounted using Mountpoint.

          Very Important Information

          Please note that any of these Mountpoints is not persistent if your VM is stopped or rebooted in the future. After each reboot, you will need to execute the mounting command as mentioned above again.

          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#automatically-mounting-an-s3-bucket-at-boot","title":"Automatically mounting an S3 bucket at boot","text":"

          Mountpoint does not currently support automatically mounting a bucket at system boot time by configuring them in the /etc/fstab. If you would like your bucket/s to automatically mount when the machine is started you will need to either set up a Cron Job in crontab or using a service manager like systemd.

          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#using-a-cron-job","title":"Using a Cron Job","text":"

          You need to create a Cron job so that the script runs each time your VM reboots, remounting S3 Object Storage to your VM.

          crontab -e\n

          Add this command to the end of the file

          @reboot sh /<Path_To_Directory>/script.sh\n

          For example,

          @reboot sh /home/ubuntu/script.sh\n

          Create script.sh file paste the below code to it.

          #!/bin/bash\nmount-s3 [OPTIONS] <BUCKET_NAME> <DIRECTORY>\n

          For example,

          #!/bin/bash\nmount-s3 --profile \"nerc\" --endpoint-url \"https://stack.nerc.mghpcc.org:13808\" --allow-other --force-path-style --debug bucket1 ~/bucket1/\n

          Make the file executable by running the below command

          chmod +x script.sh\n

          Reboot your VM:

          sudo reboot\n
          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#using-a-service-manager-like-systemd-by-creating-systemd-unit-file","title":"Using a service manager like systemd by creating systemd unit file","text":"

          Create directory in /root folder in which you will store the credentials:

          sudo mkdir /root/.aws\n

          Copy the credentials you created in your local directory to the .aws directory in the /root folder:

          sudo cp ~/.aws/credentials /root/.aws/\n
          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#create-systemd-unit-file-ie-mountpoint-s3service","title":"Create systemd unit file i.e. mountpoint-s3.service","text":"

          Create a systemd service unit file that is going to execute the above script and dynamically mount or unmount the container:

          sudo nano /etc/systemd/system/mountpoint-s3.service\n

          Edit the file to look like the below:

          [Unit]\nDescription=Mountpoint for Amazon S3 mount\nDocumentation=https://docs.aws.amazon.com/AmazonS3/latest/userguide/mountpoint.html\n#Wants=network.target\nWants=network-online.target\n#Requires=network-online.target\nAssertPathIsDirectory=/home/ubuntu/bucket1\nAfter=network-online.target\n\n[Service]\nType=forking\nUser=root\nGroup=root\nExecStart=/usr/bin/mount-s3 bucket1 /home/ubuntu/bucket1 \\\n      --profile \"nerc\" \\\n      --endpoint-url \"https://stack.nerc.mghpcc.org:13808\" \\\n      --allow-other \\\n      --force-path-style \\\n      --debug\n\nExecStop=/bin/fusermount -u /home/ubuntu/bucket1\nRestart=always\nRestartSec=10\n\n[Install]\n#WantedBy=remote-fs.target\nWantedBy=default.target\n

          Important Note

          The network-online.target lines ensure that mounting is not attempted until there's a network connection available. The service is launched as soon as the network is up and running, it mounts the bucket and remains active.

          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#launch-the-service","title":"Launch the service","text":"

          Now reload systemd deamon:

          sudo systemctl daemon-reload\n

          Start your service

          sudo systemctl start mountpoint-s3.service\n

          To check the status of your service

          sudo systemctl status mountpoint-s3.service\n

          To enable your service on every reboot

          sudo systemctl enable --now mountpoint-s3.service\n

          Information

          The service name is based on the file name i.e. /etc/systemd/system/mountpoint-s3.service so you can just use mountpoint-s3 instead of mountpoint-s3.service on all above systemctl commands.

          To debug you can use:

          sudo systemctl status mountpoint-s3.service -l --no-pager or, journalctl -u mountpoint-s3 --no-pager | tail -50

          Verify, the service is running successfully in background as root user:

          ps aux | grep mount-s3\n\nroot       13585  0.0  0.0 1060504 11672 ?       Sl   02:00   0:00 /usr/bin/mount-s3 bucket1 /home/ubuntu/bucket1 --profile nerc --endpoint-url https://stack.nerc.mghpcc.org:13808 --read-only --allow-other --force-path-style --debug\n
          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#stopping-the-service","title":"Stopping the service","text":"

          Stopping the service causes the container to unmount from the mount point.

          To disable your service on every reboot:

          sudo systemctl disable --now mountpoint-s3.service\n

          Confirm the Service is not in \"Active\" Status:

          sudo systemctl status mountpoint-s3.service\n\n\u25cb mountpoint-s3.service - Mountpoint for Amazon S3 mount\n    Loaded: loaded (/etc/systemd/system/mountpoint-s3.service; disabled; vendor p>\n    Active: inactive (dead)\n

          Unmount the local mount point:

          If you have the local mounted directory \"bucket1\" already mounted, unmount it (replace ~/bucket1 with the location in which you have it mounted):

          fusermount -u ~/bucket1\n

          Or,

          sudo umount -l ~/bucket1\n

          Now reboot your VM:

          sudo reboot\n

          Further Reading

          For further details, including instructions for downloading and installing Mountpoint on various Linux operating systems, please refer to this resource.

          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#2-using-goofys","title":"2. Using Goofys","text":""},{"location":"openstack/persistent-storage/mount-the-object-storage/#install-goofys","title":"Install goofys","text":"

          Access your virtual machine using SSH. Update the packages on your system and install wget to be able to download the goofys binary directly to your VM:

          sudo apt update && sudo apt upgrade\nsudo apt install wget\n

          Now, navigate to your home directory:

          cd\n

          Use wget to download the goofys binary:

          wget https://github.com/kahing/goofys/releases/latest/download/goofys\n

          Make the goofys binary executable:

          chmod +x goofys\n

          Copy the goofys binary to somewhere in your path

          sudo cp goofys /usr/bin/\n

          To update goofys in the future

          In order to update the newer version of goofys binary, you need to follow:

          • make sure that the data in the NERC Object Storage container is not actively used by any applications on your VM.

          • remove the goofys binary from ubuntu's home directory as well as from /usr/bin/.

          • execute the above commands (those starting with wget and chmod) from your home directory again and copy it to your path i.e. /usr/bin/.

          • reboot your VM.

          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#provide-credentials-to-configure-goofys","title":"Provide credentials to configure goofys","text":"

          Make a folder to store your credentials:

          mkdir ~/.aws/\n

          Create file ~/.aws/credentials using your favorite text editor (for example nano or vim). Add the following contents to it which requires the EC2_ACCESS_KEY and EC2_SECRET_KEY keys that you noted from ec2rc.sh file (during the \"Setup and enable your S3 API credentials\" step):

          [nerc]\naws_access_key_id=<EC2_ACCESS_KEY>\naws_secret_access_key=<EC2_SECRET_KEY>\n

          Save the file and exit the text editor.

          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#create-a-local-directory-as-a-mount-folder","title":"Create a local directory as a mount folder","text":"
          mkdir -p ~/bucket1\n
          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#mount-the-container-locally-using-goofys","title":"Mount the Container locally using goofys","text":"

          The object storage container i.e. \"bucket1\" will be mounted in the directory ~/bucket1

          goofys -o allow_other --region RegionOne --profile \"nerc\" --endpoint \"https://stack.nerc.mghpcc.org:13808\" bucket1 ~/bucket1\n

          In this command,

          • goofys is the goofys binary as we already copied this in /usr/bin/ path we don't need to specify the full path.

          • -o stands for goofys options, and is handled differently.

          • allow_other Allows goofys with option allow_other only allowed if user_allow_other is set in /etc/fuse.conf.

          • --profile corresponds to the name given on the ~/.aws/credentials file i.e. [nerc].

          • --endpoint corresponds to the Object Storage endpoint url for NERC Object Storage. You don't need to modify this url.

          • bucket1 is the name of the container which contains the NERC Object Storage resources.

          • ~/bucket1 is the location of the folder in which you want to mount the Object Storage filesystem.

          In order to test whether the mount was successful, navigate to the directory in which you mounted the NERC container repository, for example:

          cd ~/bucket1\n

          Use the ls command to list its content. You should see the output similar to this:

          ls\n\nREADME.md   image.png   test-file\n

          The NERC Object Storage container repository has now been mounted using goofys.

          Very Important Information

          Please note that any of these Mountpoints is not persistent if your VM is stopped or rebooted in the future. After each reboot, you will need to execute the mounting command as mentioned above again.

          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#mounting-on-system-startup","title":"Mounting on system startup","text":"

          Mounts can be set to occur automatically during system initialization so that mounted file systems will persist even after the VM reboot.

          Create directory in /root folder in which you will store the credentials:

          sudo mkdir /root/.aws\n

          Copy the credentials you created in your local directory to the .aws directory in the /root folder:

          sudo cp ~/.aws/credentials /root/.aws/\n
          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#configure-mounting-of-the-bucket1-container","title":"Configure mounting of the bucket1 container","text":"

          Open the file /etc/fstab using your favorite command line text editor for editing. You will need sudo privileges for that. For example, if you want to use nano, execute this command:

          sudo nano /etc/fstab\n

          Proceed with one of the methods below depending on whether you wish to have the \"bucket1\" repository automatically mounted at system startup:

          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#method-1-mount-the-repository-automatically-on-system-startup","title":"Method 1: Mount the repository automatically on system startup","text":"

          Add the following line to the /etc/fstab file:

          /usr/bin/goofys#bucket1 /home/ubuntu/bucket1 fuse _netdev,allow_other,--dir-mode=0777,--file-mode=0666,--region=RegionOne,--profile=nerc,--endpoint=https://stack.nerc.mghpcc.org:13808 0 0\n
          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#method-2-do-not-mount-the-repository-automatically-on-system-startup","title":"Method 2: Do NOT mount the repository automatically on system startup","text":"

          Add the following line to the /etc/fstab file:

          /usr/bin/goofys#bucket1 /home/ubuntu/bucket1 fuse noauto,_netdev,allow_other,--dir-mode=0777,--file-mode=0666,--region=RegionOne,--profile=nerc,--endpoint=https://stack.nerc.mghpcc.org:13808 0 0\n

          The difference between this code and the code mentioned in Method 1 is the addition of the option noauto.

          Content of /etc/fstab

          In the /etc/fstab content as added above:

          grep goofys /etc/fstab\n\n/usr/bin/goofys#bucket1 /home/ubuntu/bucket1 fuse _netdev,allow_other,--dir-mode=0777,--file-mode=0666,--region=RegionOne,--profile=nerc,--endpoint=https://stack.nerc.mghpcc.org:13808 0 0\n
          • /usr/bin/goofys with the location of your goofys binary.

          • /home/ubuntu/bucket1 is the location in which you wish to mount bucket1 container from your NERC Object Storage.

          • --profile=nerc is the name you mentioned on the ~/.aws/credentials file i.e. [nerc].

          Once you have added that line to your /etc/fstab file, reboot the VM. After the system has restarted, check whether the NERC Object Storage repository i.e. bucket1 is mounted in the directory specified by you i.e. in /home/ubuntu/bucket1.

          Important Information

          If you just want to test your mounting command written in /etc/fstab without \"Rebooting\" the VM you can also do that by running sudo mount -a. And if you want to stop automatic mounting of the container from the NERC Object Storage repository i.e. bucket1, remove the line you added in the /etc/fstab file. You can also comment it out by adding # character in front of that line. After that, reboot the VM. Optionally, you can also remove the goofys binary and the credentials file located at ~/.aws/credentials if you no longer want to use goofys.

          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#3-using-s3fs","title":"3. Using S3FS","text":""},{"location":"openstack/persistent-storage/mount-the-object-storage/#install-s3fs","title":"Install S3FS","text":"

          Access your virtual machine using SSH. Update the packages on your system and install s3fs:

          sudo apt update && sudo apt upgrade\nsudo apt install s3fs\n

          For RedHat/Rocky/AlmaLinux

          The RedHat/Rocky/AlmaLinux repositiories do not have s3fs. Therefore, you will need to compile it yourself.

          First, using your local computer, visit the following website (it contains the releases of s3fs): https://github.com/s3fs-fuse/s3fs-fuse/releases/latest.

          Then, in the section with the most recent release find the part Assets. From there, find the link to the zip version of the Source code.

          Right click on one of the Source Code i.e. \"v1.94.zip\" and select the \"Copy link address\". You will need this link to use later as a parameter for the wget command to download it to your virtual machine.

          Access your VM on the NERC OpenStack using the web console or SSH.

          Update your packages:

          sudo dnf update -y\n

          Install the prerequisites including fuse, the C++ compiler and make:

          sudo dnf config-manager --set-enabled crb\n\nsudo dnf install automake fuse fuse-devel gcc-c++ git libcurl-devel libxml2-devel make openssl-devel wget unzip\n\n# OR, sudo dnf --enablerepo=crb install automake fuse fuse-devel gcc-c++ git libcurl-devel libxml2-devel make openssl-devel wget unzip\n

          Now, use wget to download the source code. Replace https://github.com/s3fs-fuse/s3fs-fuse/archive/refs/tags/v1.94.zip with the link to the source code you found previously:

          wget https://github.com/s3fs-fuse/s3fs-fuse/archive/refs/tags/v1.94.zip\n

          Use the ls command to verify that the zip archive has been downloaded:

          ls\n

          Unzip the archive (replace v1.94.zip with the name of the archive you downloaded):

          unzip v1.94.zip\n

          Use the ls command to find the name of the folder you just extracted:

          ls\n

          Now, navigate to that folder (replace s3fs-fuse-1.94 with the name of the folder you just extracted):

          cd s3fs-fuse-1.94\n

          Perform the compilation by executing the following commands in order:

          ./autogen.sh\n./configure\nmake\nsudo make install\n

          s3fs should now be installed in /usr/local/bin/s3fs.

          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#create-a-file-which-will-store-the-s3-credentials","title":"Create a file which will store the S3 Credentials","text":"

          Store your S3 credentials in a file ${HOME}/.passwd-s3fs and set \"owner-only\" permissions. Run the following command to create a pair of EC2_ACCESS_KEY and EC2_SECRET_KEY keys that you noted from ec2rc.sh file (above) to store them in the file.

          echo EC2_ACCESS_KEY:EC2_SECRET_KEY > ${HOME}/.passwd-s3fs\n

          Change the permissions of this file to 600 to set \"owner-only\" permissions:

          chmod 600 ${HOME}/.passwd-s3fs\n
          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#create-a-container-in-the-nerc-projects-object-storage","title":"Create a Container in the NERC Project's Object storage","text":"

          We create it using the OpenStack Swift client:

          sudo apt install python3-swiftclient\n

          Let's call the Container \"bucket1\"

          swift post bucket1\n

          More about Swift Interface

          You can read more about using Swift Interface for NERC Object Storage here.

          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#create-a-local-directory-as-a-mount-point-in-your-vm","title":"Create a local directory as a mount point in your VM","text":"
          mkdir -p ~/bucket1\n
          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#mount-the-container-locally-using-s3fs","title":"Mount the Container locally using s3fs","text":"

          The object storage container i.e. \"bucket1\" will be mounted in the directory ~/bucket1

          s3fs bucket1 ~/bucket1 -o passwd_file=~/.passwd-s3fs -o url=https://stack.nerc.mghpcc.org:13808 -o use_path_request_style -o umask=0002\n
          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#unmount-the-local-mount-point","title":"Unmount the local mount point","text":"

          If you have the local mounted directory \"bucket1\" already mounted, unmount it (replace ~/bucket1 with the location in which you have it mounted):

          sudo umount -l ~/bucket1\n
          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#configure-mounting-of-the-bucket1-repository","title":"Configure mounting of the bucket1 repository","text":"

          Open the file /etc/fstab using your favorite command line text editor for editing. You will need sudo privileges for that. For example, if you want to use nano, execute this command:

          sudo nano /etc/fstab\n

          Proceed with one of the methods below depending on whether you wish to have the \"bucket1\" repository automatically mounted at system startup:

          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#method-1-mount-the-repository-automatically-on-startup","title":"Method 1: Mount the repository automatically on startup","text":"

          Add the following line to the /etc/fstab file:

          /usr/bin/s3fs#bucket1 /home/ubuntu/bucket1 fuse passwd_file=/home/ubuntu/.passwd-s3fs,_netdev,allow_other,use_path_request_style,uid=0,umask=0222,mp_umask=0222,gid=0,url=https://stack.nerc.mghpcc.org:13808 0 0\n
          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#method-2-do-not-mount-the-repository-automatically-on-startup","title":"Method 2: Do NOT mount the repository automatically on startup","text":"

          Add the following line to the /etc/fstab file:

          /usr/bin/s3fs#bucket1 /home/ubuntu/bucket1 fuse noauto,passwd_file=/home/ubuntu/.passwd-s3fs,_netdev,allow_other,use_path_request_style,uid=0,umask=0222,mp_umask=0222,gid=0,url=https://stack.nerc.mghpcc.org:13808 0 0\n

          The difference between this code and the code mentioned in Method 1 is the addition of the option noauto.

          Content of /etc/fstab

          In the /etc/fstab content as added above:

          • /usr/bin/s3fs is the location of your s3fs binary. If you installed it using apt on Debian or Ubuntu, you do not have to change anything here. If you are using a self-compiled version of s3fs created on RedHat/Rocky/AlmaLinux as explained above, that location is /usr/local/bin/s3fs.

          • /home/ubuntu/.passwd-s3fs is the location of the file which contains the key pair used for mounting the \"bucket1\" repository as we named it in previous step.

          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#4-using-rclone","title":"4. Using Rclone","text":""},{"location":"openstack/persistent-storage/mount-the-object-storage/#installing-rclone","title":"Installing Rclone","text":"

          Install rclone as described here or for our Ubuntu based VM we can just SSH into the VM and then run the following command using default ubuntu user:

          curl -sSL https://rclone.org/install.sh | sudo bash\n
          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#configuring-rclone","title":"Configuring Rclone","text":"

          If you run rclone config file you will see where the default location is for you.

          rclone config file\nConfiguration file doesn't exist, but rclone will use this path:\n/home/ubuntu/.config/rclone/rclone.conf\n

          So create the config file as mentioned above path: /home/ubuntu/.config/rclone/rclone.conf and add the following entry with the name [nerc]:

          [nerc]\ntype = s3\nenv_auth = false\nprovider = Other\nendpoint = https://stack.nerc.mghpcc.org:13808\nacl = public-read\naccess_key_id = <YOUR_EC2_ACCESS_KEY_FROM_ec2rc_FILE>\nsecret_access_key = <YOUR_EC2_SECRET_KEY_FROM_ec2rc_FILE>\nlocation_constraint =\nserver_side_encryption =\n

          More about the config for AWS S3 compatible API can be seen here.

          Important Information

          Mind that if set env_auth = true then it will take variables from environment, so you shouldn't insert it in this case.

          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#listing-the-containers-and-contents-of-a-container","title":"Listing the Containers and Contents of a Container","text":"

          Once your Object Storage has been configured in Rclone, you can then use the Rclone interface to List all the Containers with the \"lsd\" command

          rclone lsd \"nerc:\"\n

          Or,

          rclone lsd \"nerc:\" --config=rclone.conf\n

          For e.g.,

          rclone lsd \"nerc:\" --config=rclone.conf\n      -1 2024-04-23 20:21:43        -1 bucket1\n

          To list the files and folders available within a container i.e. \"bucket1\" in this case, within a container we can use the \"ls\" command:

          rclone ls \"nerc:bucket1/\"\n  653 README.md\n    0 image.png\n   12 test-file\n
          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#create-a-mount-point-directory","title":"Create a mount point directory","text":"
          mkdir -p bucket1\n
          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#mount-the-container-with-rclone","title":"Mount the container with Rclone","text":"

          Start the mount like this, where home/ubuntu/bucket1 is an empty existing directory:

          rclone -vv --vfs-cache-mode full mount nerc:bucket1 /home/ubuntu/bucket1 --allow-other --allow-non-empty\n

          On Linux, you can run mount in either foreground or background (aka daemon) mode. Mount runs in foreground mode by default. Use the --daemon flag to force background mode i.e.

          rclone mount remote:path/to/files /path/to/local/mount --daemon\n

          When running in background mode the user will have to stop the mount manually:

          fusermount -u /path/to/local/mount\n

          Or,

          sudo umount -l /path/to/local/mount\n

          Now we have the mount running and we have background mode also enabled. Lets say there is a scenario where we want the mount to be persistent after a server/machine reboot. There are few ways to do it:

          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#create-systemd-unit-file-ie-rclone-mountservice","title":"Create systemd unit file i.e. rclone-mount.service","text":"

          Create a systemd service unit file that is going to execute the above script and dynamically mount or unmount the container:

          sudo nano /etc/systemd/system/rclone-mount.service\n

          Edit the file to look like the below:

          [Unit]\nDescription=rclone mount\nDocumentation=http://rclone.org/docs/\nAssertPathIsDirectory=/home/ubuntu/bucket1\nAfter=network-online.target\n\n[Service]\nType=simple\nUser=root\nGroup=root\nExecStart=/usr/bin/rclone mount \\\n      --config=home/ubuntu/.config/rclone/rclone.conf \\\n      --vfs-cache-mode full \\\n      nerc:bucket1 /home/ubuntu/bucket1 \\\n              --allow-other \\\n              --allow-non-empty\n\nExecStop=/bin/fusermount -u /home/ubuntu/bucket1\nRestart=always\nRestartSec=10\n\n[Install]\nWantedBy=default.target\n

          The service is launched as soon as the network is up and running, it mounts the bucket and remains active. Stopping the service causes the container to unmount from the mount point.

          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#launch-the-service-using-a-service-manager","title":"Launch the service using a service manager","text":"

          Now reload systemd deamon:

          sudo systemctl daemon-reload\n

          Start your service

          sudo systemctl start rclone-mount.service\n

          To check the status of your service

          sudo systemctl status rclone-mount.service\n

          To enable your service on every reboot

          sudo systemctl enable --now rclone-mount.service\n

          Information

          The service name is based on the file name i.e. /etc/systemd/system/rclone-mount.service so you can just use rclone-mount instead of rclone-mount.service on all above systemctl commands.

          To debug you can use:

          sudo systemctl status rclone-mount.service -l --no-pager or, journalctl -u rclone-mount --no-pager | tail -50

          Verify, if the container is mounted successfully:

          df -hT | grep rclone\nnerc:bucket1   fuse.rclone  1.0P     0  1.0P   0% /home/ubuntu/bucket1\n
          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#5-using-juicefs","title":"5. Using JuiceFS","text":""},{"location":"openstack/persistent-storage/mount-the-object-storage/#preparation","title":"Preparation","text":"

          A JuiceFS file system consists of two parts:

          • Object Storage: Used for data storage.

          • Metadata Engine: A database used for storing metadata. In this case, we will use a durable Redis in-memory database service that provides extremely fast performance.

          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#installation-of-the-juicefs-client","title":"Installation of the JuiceFS client","text":"

          Access your virtual machine using SSH. Update the packages on your system and install the JuiceFS client:

          sudo apt update && sudo apt upgrade\n# default installation path is /usr/local/bin\ncurl -sSL https://d.juicefs.com/install | sh -\n

          Verify the JuiceFS client is running in background:

          ps aux | grep juicefs\nubuntu     16275  0.0  0.0   7008  2212 pts/0    S+   18:44   0:00 grep --color=auto juicefs\n
          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#installing-and-configuring-redis-database","title":"Installing and Configuring Redis database","text":"

          Install Redis by running:

          sudo apt install redis-server\n

          This will download and install Redis and its dependencies. Following this, there is one important configuration change to make in the Redis configuration file, which was generated automatically during the installation.

          You can check the line number where to find supervised by running:

          sudo cat /etc/redis/redis.conf -n | grep supervised\n\n228  #   supervised no      - no supervision interaction\n229  #   supervised upstart - signal upstart by putting Redis into SIGSTOP mode\n231  #   supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET\n232  #   supervised auto    - detect upstart or systemd method based on\n236  supervised no\n

          Open this file with your preferred text editor:

          sudo nano /etc/redis/redis.conf -l\n

          Inside the config file, find the supervised directive. This directive allows you to declare an init system to manage Redis as a service, providing you with more control over its operation. The supervised directive is set to no by default. Since you are running Ubuntu, which uses the systemd init system, change this to systemd as shown here:

          • Binding to localhost:

          By default, Redis is only accessible from localhost. We need to verify that by locating this line by running:

            sudo cat /etc/redis/redis.conf -n | grep bind\n\n  ...\n  68  bind 127.0.0.1 ::1\n  ...\n

          and make sure it is uncommented (remove the # if it exists) by editing this file with your preferred text editor.

          So save and close it when you are finished. If you used nano to edit the file, do so by pressing CTRL + X, Y, then ENTER.

          Then, restart the Redis service to reflect the changes you made to the configuration file:

            sudo systemctl restart redis.service\n

          With that, you've installed and configured Redis and it's running on your machine. Before you begin using it, you should first check whether Redis is functioning correctly.

          Start by checking that the Redis service is running:

            sudo systemctl status redis\n

          If it is running without any errors, this command will show \"active (running)\" Status.

          To test that Redis is functioning correctly, connect to the server using redis-cli, Redis's command-line client:

            redis-cli\n

          In the prompt that follows, test connectivity with the ping command:

            ping\n

          Output:

            PONG\n

          Also, check that binding to localhost is working fine by running the following netstat command:

            sudo netstat -lnp | grep redis\n\n  tcp        0      0 127.0.0.1:6379          0.0.0.0:*               LISTEN      16967/redis-server\n  tcp6       0      0 ::1:6379                :::*                    LISTEN      16967/redis-server\n

          Important Note

          The netstat command may not be available on your system by default. If this is the case, you can install it (along with a number of other handy networking tools) with the following command: sudo apt install net-tools.

          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#configuring-a-redis-password","title":"Configuring a Redis Password","text":"

          Configuring a Redis password enables one of its two built-in security features \u2014 the auth command, which requires clients to authenticate to access the database. The password is configured directly in Redis's configuration file, /etc/redis/redis.conf.

          First, we need to locate the line where the requirepass directive is mentioned:

          sudo cat /etc/redis/redis.conf -n | grep requirepass\n\n...\n790  # requirepass foobared\n...\n

          Then open the Redis's config file i.e. /etc/redis/redis.conf again with your preferred editor:

          sudo nano /etc/redis/redis.conf -l\n

          Uncomment it by removing the #, and change foobared to a secure password.

          How to generate random password?

          You can use openssl to generate random password by running the following command locally:

          openssl rand 12 | openssl base64 -A

          <your_redis_password>

          After saving and closing it when you are finished. You need to restart the Redis service to reflect the changes you made to the configuration file by running:

          sudo systemctl restart redis.service\n

          To test that the password works, open up the Redis client:

          redis-cli\n

          The following shows a sequence of commands used to test whether the Redis password works. The first command tries to set a key to a value before authentication:

          127.0.0.1:6379> set key1 10\n

          That won\u2019t work because you didn't authenticate, so Redis returns an error:

          Output:

          (error) NOAUTH Authentication required.\n

          The next command authenticates with the password specified in the Redis configuration file:

          127.0.0.1:6379> auth <your_redis_password>\n

          Redis acknowledges:

          Output:

          OK\n

          After that, running the previous command again will succeed:

          127.0.0.1:6379> set key1 10\n

          Output:

          OK\n

          get key1 queries Redis for the value of the new key.

          127.0.0.1:6379> get key1\n

          Output:

          \"10\"\n

          After confirming that you're able to run commands in the Redis client after authenticating, you can exit redis-cli:

          127.0.0.1:6379> quit\n
          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#setting-authorizing-s3-access-using-juicefs-config","title":"Setting authorizing S3 access using juicefs config","text":"

          You can store the S3 credentials using juicefs config that allows us to add the Access Key and Secret Key for the file system by running:

          juicefs config \\\n--access-key=<EC2_ACCESS_KEY> \\\n--secret-key=<EC2_SECRET_KEY> \\\nredis://default:<your_redis_password>@127.0.0.1:6379/1\n
          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#formatting-file-system","title":"Formatting file system","text":"
          sudo juicefs format --storage s3 --bucket https://stack.nerc.mghpcc.org:13808/<your_container> redis://default:<your_redis_password>@127.0.0.1:6379/1 myjfs\n
          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#mounting-file-system-manually","title":"Mounting file system manually","text":""},{"location":"openstack/persistent-storage/mount-the-object-storage/#create-a-local-directory-as-a-mount-point-folder","title":"Create a local directory as a mount point folder","text":"
          mkdir -p ~/bucket1\n
          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#mount-the-container-locally-using-juicefs","title":"Mount the Container locally using juicefs","text":"

          The formatted file system \"myjfs\" will be mounted in the directory ~/bucket1 by running the following command:

          juicefs mount redis://default:<your_redis_password>@127.0.0.1:6379/1 ~/bucket1\n
          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#mount-juicefs-at-boot-time","title":"Mount JuiceFS at Boot Time","text":"

          After JuiceFS has been successfully formatted, follow this guide to set up auto-mount on boot.

          We can speficy the --update-fstab option on the mount command that will automatically help you set up mount at boot:

          sudo juicefs mount --update-fstab --max-uploads=50 --writeback --cache-size 204800 <META-URL> <MOUNTPOINT>\n\ngrep <MOUNTPOINT> /etc/fstab\n<META-URL> <MOUNTPOINT> juicefs _netdev,max-uploads=50,writeback,cache-size=204800 0 0\n\nls -l /sbin/mount.juicefs\nlrwxrwxrwx 1 root root 22 Apr 24 20:25 /sbin/mount.juicefs -> /usr/local/bin/juicefs\n

          For example,

          sudo juicefs mount --update-fstab --max-uploads=50 --writeback --cache-size 204800 redis://default:<your_redis_password>@127.0.0.1:6379/1 ~/bucket1\n\ngrep juicefs /etc/fstab\nredis://default:<your_redis_password>@127.0.0.1:6379/1  /home/ubuntu/bucket1  juicefs  _netdev,cache-size=204800,max-uploads=50,writeback  0 0\n\nls -l /sbin/mount.juicefs\nlrwxrwxrwx 1 root root 22 Apr 24 20:25 /sbin/mount.juicefs -> /usr/local/bin/juicefs\n
          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#automating-mounting-with-systemd-service-unit-file","title":"Automating Mounting with systemd service unit file","text":"

          If you're using JuiceFS and need to apply settings like database access password, S3 access key, and secret key, which are hidden from the command line using environment variables for security reason, it may not be easy to configure them in the /etc/fstab file. In such cases, you can utilize systemd to mount your JuiceFS instance.

          Here's how you can set up your systemd configuration file:

          Create a systemd service unit file that is going to execute the above script and dynamically mount or unmount the container:

          sudo nano /etc/systemd/system/juicefs-mount.service\n

          Edit the file to look like the below:

          [Unit]\nDescription=JuiceFS mount\nDocumentation=https://juicefs.com/docs/\nAssertPathIsDirectory=/home/ubuntu/bucket1\nAfter=network-online.target\n\n[Service]\nType=simple\nUser=root\nGroup=root\nExecStart=/usr/local/bin/juicefs mount \\\n\"redis://default:<your_redis_password>@127.0.0.1:6379/1\" \\\n/home/ubuntu/bucket1 \\\n--no-usage-report \\\n--writeback \\\n--cache-size 102400 \\\n--cache-dir /home/juicefs_cache \\\n--buffer-size 2048 \\\n--open-cache 0 \\\n--attr-cache 1 \\\n--entry-cache 1 \\\n--dir-entry-cache 1 \\\n--cache-partial-only false \\\n--free-space-ratio 0.1 \\\n--max-uploads 20 \\\n--max-deletes 10 \\\n--backup-meta 0 \\\n--log /var/log/juicefs.log \\\n--get-timeout 300 \\\n--put-timeout 900 \\\n--io-retries 90 \\\n--prefetch 1\n\nExecStop=/usr/local/bin/juicefs umount /home/ubuntu/bucket1\nRestart=always\nRestartSec=10\n\n[Install]\nWantedBy=default.target\n

          Important Information

          Feel free to modify the options and environments according to your needs. Please make sure you change <your_redis_password> to your own Redis password that was setup by following this step.

          The service is launched as soon as the network is up and running, it mounts the bucket and remains active. Stopping the service causes the container to unmount from the mount point.

          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#launch-the-service-as-daemon","title":"Launch the service as daemon","text":"

          Now reload systemd deamon:

          sudo systemctl daemon-reload\n

          Start your service

          sudo systemctl start juicefs-mount.service\n

          To check the status of your service

          sudo systemctl status juicefs-mount.service\n

          To enable your service on every reboot

          sudo systemctl enable --now juicefs-mount.service\n

          Information

          The service name is based on the file name i.e. /etc/systemd/system/juicefs-mount.service so you can just use juicefs-mount instead of juicefs-mount.service on all above systemctl commands.

          To debug you can use:

          sudo systemctl status juicefs-mount.service -l --no-pager or, journalctl -u juicefs-mount --no-pager | tail -50

          Verify, if the container is mounted successfully:

          df -hT | grep juicefs\nJuiceFS:myjfs  fuse.juicefs  1.0P  4.0K  1.0P   1% /home/ubuntu/bucket1\n
          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#data-synchronization","title":"Data Synchronization","text":"

          juicefs sync is a powerful data migration tool, which can copy data across all supported storages including object storage, JuiceFS itself, and local file systems, you can freely copy data between any of these systems.

          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#command-syntax","title":"Command Syntax","text":"

          To synchronize data from SRC i.e. the source data address or path to DST i.e. the destination address or path;, capable for both directories and files.

          juicefs sync [command options] SRC DST\n

          More Information

          [command options] are synchronization options. See command reference for more details.

          Address format:

          [NAME://][ACCESS_KEY:SECRET_KEY[:TOKEN]@]BUCKET[.ENDPOINT][/PREFIX]\n\n# MinIO only supports path style\nminio://[ACCESS_KEY:SECRET_KEY[:TOKEN]@]ENDPOINT/BUCKET[/PREFIX]\n
          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#synchronize-between-object-storage-and-juicefs","title":"Synchronize between Object Storage and JuiceFS","text":"

          The following command synchronizes movies container on Object Storage Container to your local JuiceFS File System i.e ~/jfs:

          # create local folder\nmkdir -p ~/jfs\n# mount JuiceFS\njuicefs mount -d redis://default:<your_redis_password>@127.0.0.1:6379/1 ~/jfs\n# synchronize\njuicefs sync --force-update s3://<EC2_ACCESS_KEY>:<EC2_SECRET_KEY>@movies.stack.nerc.mghpcc.org:13808/ ~/jfs/\n

          The following command synchronizes images directory from your local JuiceFS File System i.e ~/jfs to Object Storage Container i.e. movies container:

          # mount JuiceFS\njuicefs mount -d redis://default:<your_redis_password>@127.0.0.1:6379/1 ~/jfs\n# create local folder and add some file to this folder\nmkdir -p ~/jfs/images/\ncp \"test.image\" ~/jfs/images/\n# synchronization\njuicefs sync --force-update ~/jfs/images/ s3://<EC2_ACCESS_KEY>:<EC2_SECRET_KEY>@movies.stack.nerc.mghpcc.org:13808/images/\n
          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#how-to-destroy-a-file-system","title":"How to destroy a file system","text":"

          After JuiceFS has been successfully formatted, follow this guide to clean up.

          JuiceFS client provides the destroy command to completely destroy a file system, which will result in:

          • Deletion of all metadata entries of this file system

          • Deletion of all data blocks of this file system

          Use this command in the following format:

          juicefs destroy <METADATA URL> <UUID>\n

          Here,

          <METADATA URL>: The URL address of the metadata engine

          <UUID>: The UUID of the file system

          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#find-the-uuid-of-existing-mount-file-system","title":"Find the UUID of existing mount file system","text":"

          You can run either juicefs config redis://default:<your_redis_password>@127.0.0.1:6379/1 or juicefs status redis://default:<your_redis_password>@127.0.0.1:6379/1 to get detailed information about mounted file system i.e. \"myjfs\" that is setup by following this step. The output looks like shown here:

          {\n...\n\"Name\": \"myjfs\",\n\"UUID\": \"<UUID>\",\n...\n}\n
          "},{"location":"openstack/persistent-storage/mount-the-object-storage/#destroy-a-file-system","title":"Destroy a file system","text":"

          Please note the \"UUID\" that you will need to run juicefs destroy command as shown below:

          juicefs destroy redis://default:<your_redis_password>@127.0.0.1:6379/1 <UUID> --force\n

          When destroying a file system, the client will issue a confirmation prompt. Please make sure to check the file system information carefully and enter y after confirming it is correct.

          Danger

          The destroy operation will cause all the data in the database and the object storage associated with the file system to be deleted. Please make sure to back up the important data before operating!

          "},{"location":"openstack/persistent-storage/object-storage/","title":"Object Storage","text":"

          OpenStack Object Storage (Swift) is a highly available, distributed, eventually consistent object/blob store. Object Storage is used to manage cost-effective and long-term preservation and storage of large amounts of data across clusters of standard server hardware. The common use cases include the storage, backup and archiving of unstructured data, such as documents, static web content, images, video files, and virtual machine images, etc.

          The end-users can interact with the object storage system through a RESTful HTTP API i.e. the Swift API or use one of the many client libraries that exist for all of the popular programming languages, such as Java, Python, Ruby, and C# based on provisioned quotas. Swift also supports and is compatible with Amazon's Simple Storage Service (S3) API that makes it easier for the end-users to move data between multiple storage end points and supports hybrid cloud setup.

          "},{"location":"openstack/persistent-storage/object-storage/#1-access-by-web-interface-ie-horizon-dashboard","title":"1. Access by Web Interface i.e. Horizon Dashboard","text":"

          To get started, navigate to Project -> Object Store -> Containers.

          "},{"location":"openstack/persistent-storage/object-storage/#create-a-container","title":"Create a Container","text":"

          In order to store objects, you need at least one Container to put them in. Containers are essentially top-level directories. Other services use the terminology buckets.

          Click Create Container. Give your container a name.

          Important Note

          The container name needs to be unique, not just within your project but across all of our OpenStack installation. If you get an error message after trying to create the container, try giving it a more unique name.

          For now, leave the \"Container Access\" set to Private.

          "},{"location":"openstack/persistent-storage/object-storage/#upload-a-file","title":"Upload a File","text":"

          Click on the name of your container, and click the Upload File icon as shown below:

          Click Browse and select a file from your local machine to upload.

          It can take a while to upload very large files, so if you're just testing it out you may want to use a small text file or similar.

          By default the File Name will be the same as the original file, but you can change it to another name. Click \"Upload File\". Your file will appear inside the container as shown below once successful:

          "},{"location":"openstack/persistent-storage/object-storage/#using-folders","title":"Using Folders","text":"

          Files stored by definition do not organize objects into folders, but you can use folders to keep your data organized.

          On the backend, the folder name is actually just prefixed to the object name, but from the web interface (and most other clients) it works just like a folder.

          To add a folder, click on the \"+ folder\" icon as shown below:

          "},{"location":"openstack/persistent-storage/object-storage/#make-a-container-public","title":"Make a container public","text":"

          Making a container public allows you to send your collaborators a URL that gives access to the container's contents.

          Hosting a static website using public Container

          You can use public Container to host a static website. On a static website, individual webpages include static website content (HTML, CSS etc.). They might also contain client-side scripts (e.g. JavaScript).

          Click on your container's name, then check the \"Public Access\" checkbox. Note that \"Public Access\" changes from \"Disabled\" to \"Link\".

          Click \"Link\" to see a list of object in the container. This is the URL of your container.

          Important Note

          Anyone who obtains the URL will be able to access the container, so this is not recommended as a way to share sensitive data with collaborators.

          In addition, everything inside a public container is public, so we recommend creating a separate container specifically for files that should be made public.

          To download the file test-file we would use the following url.

          Very Important Information

          Here 4c5bccef73c144679d44cbc96b42df4e is specific Tenant Id or Project Id. You can get this value when you click on the public container's Link on a new browser tab.

          Or, you can just click on \"Download\" next to the file's name as shown below:

          You can also interact with public objects using a utility such as curl:

          curl https://stack.nerc.mghpcc.org:13808/v1/AUTH_4c5bccef73c144679d44cbc96b42df4e/unique-container-test\ntest-file\n

          To download a file:

          curl -o local-file.txt https://stack.nerc.mghpcc.org:13808/v1/AUTH_4c5bccef73c144679d44cbc96b42df4e/unique-container-test/test-file\n
          "},{"location":"openstack/persistent-storage/object-storage/#make-a-container-private","title":"Make a container private","text":"

          You can make a public container private by clicking on your container's name, then uncheck the \"Public Access\" checkbox. Note that \"Public Access\" changes from \"Link\" to \"Disabled\".

          This will deactivate the public URL of the container and then it will show \"Disabled\".

          "},{"location":"openstack/persistent-storage/object-storage/#2-access-by-using-apis","title":"2. Access by using APIs","text":""},{"location":"openstack/persistent-storage/object-storage/#i-openstack-cli","title":"i. OpenStack CLI","text":"

          Prerequisites:

          To run the OpenStack CLI commands, you need to have:

          • OpenStack CLI setup, see OpenStack Command Line setup for more information.
          "},{"location":"openstack/persistent-storage/object-storage/#some-object-storage-management-examples","title":"Some Object Storage management examples","text":""},{"location":"openstack/persistent-storage/object-storage/#create-a-container_1","title":"Create a container","text":"

          In order to create a container in the Object Storage service, you can use the openstack client with the following command.

          openstack container create mycontainer\n+---------------------------------------+-------------+------------------------------------+\n| account                               | container   | x-trans-id                         |\n+---------------------------------------+-------------+------------------------------------+\n| AUTH_4c5bccef73c144679d44cbc96b42df4e | mycontainer | txb875f426a011476785171-00624b37e8 |\n+---------------------------------------+-------------+------------------------------------+\n

          Once created you can start adding objects.

          "},{"location":"openstack/persistent-storage/object-storage/#manipulate-objects-in-a-container","title":"Manipulate objects in a container","text":"

          To upload files to a container you can use the following command

          openstack object create --name my_test_file mycontainer test_file.txt\n+--------------+-------------+----------------------------------+\n| object       | container   | etag                             |\n+--------------+-------------+----------------------------------+\n| my_test_file | mycontainer | e3024896943ee80422d1e5ff44423658 |\n+--------------+-------------+----------------------------------+\n

          Once uploaded you can see the metadata through:

          openstack object show mycontainer my_test_file\n+----------------+---------------------------------------+\n| Field          | Value                                 |\n+----------------+---------------------------------------+\n| account        | AUTH_4c5bccef73c144679d44cbc96b42df4e |\n| container      | mycontainer                           |\n| content-length | 26                                    |\n| content-type   | application/octet-stream              |\n| etag           | e3024896943ee80422d1e5ff44423658      |\n| last-modified  | Mon, 04 Apr 2022 18:27:14 GMT         |\n| object         | my_test_file                          |\n+----------------+---------------------------------------+\n

          You can save the contents of the object from your container to your local machine by using:

          openstack object save mycontainer my_test_file --file test_file.txt

          Very Important

          Please note that this will overwrite the file in the local directory.

          Finally you can delete the object with the following command

          openstack object delete mycontainer my_test_file

          "},{"location":"openstack/persistent-storage/object-storage/#delete-the-container","title":"Delete the container","text":"

          If you want to delete the container, you can use the following command

          openstack container delete mycontainer

          If the container has some data, you can trigger the recursive option to delete the objects internally.

          openstack container delete mycontainer\nConflict (HTTP 409) (Request-ID: tx6b53c2b3e52d453e973b4-00624b400f)\n

          So, try to delete the container recursively using command

          openstack container delete --recursive mycontainer

          "},{"location":"openstack/persistent-storage/object-storage/#list-existing-containers","title":"List existing containers","text":"

          You can check the existing containers with

          openstack container list\n+---------------+\n| Name          |\n+---------------+\n| mycontainer   |\n+---------------+\n
          "},{"location":"openstack/persistent-storage/object-storage/#swift-quota-utilization","title":"Swift quota utilization","text":"

          To check the overall space used, you can use the following command

          openstack object store account show\n+------------+---------------------------------------+\n| Field      | Value                                 |\n+------------+---------------------------------------+\n| Account    | AUTH_4c5bccef73c144679d44cbc96b42df4e |\n| Bytes      | 665                                   |\n| Containers | 1                                     |\n| Objects    | 3                                     |\n+------------+---------------------------------------+\n

          To check the space used by a specific container

          openstack container show mycontainer\n+----------------+---------------------------------------+\n| Field          | Value                                 |\n+----------------+---------------------------------------+\n| account        | AUTH_4c5bccef73c144679d44cbc96b42df4e |\n| bytes_used     | 665                                   |\n| container      | mycontainer                           |\n| object_count   | 3                                     |\n| read_acl       | .r:*,.rlistings                       |\n| storage_policy | Policy-0                              |\n+----------------+---------------------------------------+\n
          "},{"location":"openstack/persistent-storage/object-storage/#ii-swift-interface","title":"ii. Swift Interface","text":"

          This is a python client for the Swift API. There's a Python API (the swiftclient module), and a command-line script (swift).

          • This example uses a Python3 virtual environment, but you are free to choose any other method to create a local virtual environment like Conda.
            python3 -m venv venv\n

          Choosing Correct Python Interpreter

          Make sure you are able to use python or python3 or py -3 (For Windows Only) to create a directory named venv (or whatever name you specified) in your current working directory.

          • Activate the virtual environment by running:

          on Linux/Mac: source venv/bin/activate

          on Windows: venv\\Scripts\\activate

          "},{"location":"openstack/persistent-storage/object-storage/#install-python-swift-client-page-at-pypi","title":"Install Python Swift Client page at PyPi","text":"
          • Once virtual environment is activated, install python-swiftclient and python-keystoneclient

          pip install python-swiftclient python-keystoneclient

          • Swift authenticates using a user, tenant, and key, which map to your OpenStack username, project,and password.

          For this, you need to download the \"NERC's OpenStack RC File\" with the credentials for your NERC project from the NERC's OpenStack dashboard. Then you need to source that RC file using: source *-openrc.sh. You can read here on how to do this.

          By sourcing the \"NERC's OpenStack RC File\", you will set the all required environmental variables.

          "},{"location":"openstack/persistent-storage/object-storage/#check-your-authentication-variables","title":"Check your authentication variables","text":"

          Check what the swift client will use as authentication variables:

          swift auth\n
          "},{"location":"openstack/persistent-storage/object-storage/#create-your-first-container","title":"Create your first container","text":"

          Lets create your first container by using the following command:

          swift post <container_name>\n

          For example:

          swift post unique-container-test\n
          "},{"location":"openstack/persistent-storage/object-storage/#upload-files","title":"Upload files","text":"

          Upload a file to your container:

          swift upload <container_name> <file_or_folder>\n

          To upload a file to the above listed i.e. unique-container-test, you can run the following command:

          swift upload unique-container-test ./README.md\n
          "},{"location":"openstack/persistent-storage/object-storage/#show-containers","title":"Show containers","text":"

          Then type the following command to get list of your containers:

          swift list\n

          This will output your existing container on your project, for e.g. unique-container-test

          Show objects inside your container:

          swift list <container_name>.\n

          For example:

          swift list unique-container-test\nREADME.md\n
          "},{"location":"openstack/persistent-storage/object-storage/#show-statistics-of-your-containers-and-objects","title":"Show statistics of your containers and objects","text":"

          You can see statistics, ranging from specific objects to the entire account. Use the following command to se statistics of the specific container.

          swift stat <container_name>\n

          You can also use swift stat <container_name> <filename> to check stats of individual files.

          If you want to see stats from your whole account, you can type:

          swift stat\n
          "},{"location":"openstack/persistent-storage/object-storage/#download-objects","title":"Download objects","text":"

          You can download single objects by using the following command:

          swift download <container_name> <your_object> -o /path/to/local/<your_object>\n

          For example:

          swift download unique-container-test README.md -o ./README.md\nREADME.md [auth 2.763s, headers 2.907s, total 2.907s, 0.000 MB/s]\n

          It's possible to test downloading an object/container without actually downloading, for testing purposes:

          swift download <container-name> --no-download\n
          "},{"location":"openstack/persistent-storage/object-storage/#download-all-objects-from-specific-container","title":"Download all objects from specific container","text":"
          swift download <container_name> -D </path/to/folder/>\n
          "},{"location":"openstack/persistent-storage/object-storage/#download-all-objects-from-your-account","title":"Download all objects from your account","text":"
          swift download --all -D </path/to/folder/>\n
          "},{"location":"openstack/persistent-storage/object-storage/#delete-objects","title":"Delete objects","text":"

          Delete specific object by issuing the following command:

          swift delete <container_name> <object_name>\n

          For example:

          swift delete unique-container-test README.md\nREADME.md\n

          And finally delete specific container by typing the following:

          swift delete <container_name>\n

          For example:

          swift delete unique-container-test\n

          Other helpful Swift commands:

          delete               Delete a container or objects within a container.\ndownload             Download objects from containers.\nlist                 Lists the containers for the account or the objects\n                    for a container.\npost                 Updates meta information for the account, container,\n                    or object; creates containers if not present.\ncopy                 Copies object, optionally adds meta\nstat                 Displays information for the account, container,\n                    or object.\nupload               Uploads files or directories to the given container.\ncapabilities         List cluster capabilities.\ntempurl              Create a temporary URL.\nauth                 Display auth related environment variables.\nbash_completion      Outputs option and flag cli data ready for\n                    bash_completion.\n

          Helpful Tip

          Type swift -h to learn more about using the swift commands. The client has a --debugflag, which can be useful if you are facing any issues.

          "},{"location":"openstack/persistent-storage/object-storage/#iii-using-aws-cli","title":"iii. Using AWS CLI","text":"

          The Ceph Object Gateway supports basic operations through the Amazon S3 interface.

          You can use both high-level (s3) commands with the AWS CLI and API-Level (s3api) commands with the AWS CLI to access object storage on your NERC project.

          Prerequisites:

          To run the s3 or s3api commands, you need to have:

          • AWS CLI installed, see Installing or updating the latest version of the AWS CLI for more information.

          • The NERC's Swift End Point URL: https://stack.nerc.mghpcc.org:13808

          Understand these Amazon S3 terms

          i. Bucket \u2013 A top-level Amazon S3 folder.

          ii. Prefix \u2013 An Amazon S3 folder in a bucket.

          iii. Object \u2013 Any item that's hosted in an Amazon S3 bucket.

          "},{"location":"openstack/persistent-storage/object-storage/#configuring-the-aws-cli","title":"Configuring the AWS CLI","text":"

          To access this interface, you must login through the OpenStack Dashboard and navigate to \"Projects > API Access\" where you can download the \"Download OpenStack RC File\" as well as the \"EC2 Credentials\".

          While clicking on \"EC2 Credentials\", this will download a file zip file including ec2rc.sh file that has content similar to shown below. The important parts are EC2_ACCESS_KEY and EC2_SECRET_KEY, keep them noted.

          #!/bin/bash\n\nNOVARC=$(readlink -f \"${BASH_SOURCE:-${0}}\" 2>/dev/null) || NOVARC=$(python -c 'import os,sys; print os.path.abspath(os.path.realpath(sys.argv[1]))' \"${BASH_SOURCE:-${0}}\")\nNOVA_KEY_DIR=${NOVARC%/*}\nexport EC2_ACCESS_KEY=...\nexport EC2_SECRET_KEY=...\nexport EC2_URL=https://localhost/notimplemented\nexport EC2_USER_ID=42 # nova does not use user id, but bundling requires it\nexport EC2_PRIVATE_KEY=${NOVA_KEY_DIR}/pk.pem\nexport EC2_CERT=${NOVA_KEY_DIR}/cert.pem\nexport NOVA_CERT=${NOVA_KEY_DIR}/cacert.pem\nexport EUCALYPTUS_CERT=${NOVA_CERT} # euca-bundle-image seems to require this set\n\nalias ec2-bundle-image=\"ec2-bundle-image --cert ${EC2_CERT} --privatekey ${EC2_PRIVATE_KEY} --user 42 --ec2cert ${NOVA_CERT}\"\nalias ec2-upload-bundle=\"ec2-upload-bundle -a ${EC2_ACCESS_KEY} -s ${EC2_SECRET_KEY} --url ${S3_URL} --ec2cert ${NOVA_CERT}\"\n

          Alternatively, you can obtain your EC2 access keys using the openstack client:

          sudo apt install python3-openstackclient\n\nopenstack ec2 credentials list\n+------------------+------------------+--------------+-----------+\n| Access           | Secret           | Project ID   | User ID   |\n+------------------+------------------+--------------+-----------+\n| <EC2_ACCESS_KEY> | <EC2_SECRET_KEY> | <Project_ID> | <User_ID> |\n+------------------+------------------+--------------+-----------+\n

          OR, you can even create a new one by running:

          openstack ec2 credentials create\n
          • Source the downloaded OpenStack RC File from Projects > API Access by using: source *-openrc.sh command. Sourcing the RC File will set the required environment variables.

          Then run aws configuration command which requires the EC2_ACCESS_KEY and EC2_SECRET_KEY keys that you noted from ec2rc.sh file (during the \"Configuring the AWS CLI\" step):

            $> aws configure --profile \"'${OS_PROJECT_NAME}'\"\n  AWS Access Key ID [None]: <EC2_ACCESS_KEY>\n  AWS Secret Access Key [None]: <EC2_SECRET_KEY>\n  Default region name [None]:\n  Default output format [None]:\n

          This will create the configuration file for AWS cli in your home directory ~/.aws/config with the EC2 profile based on your ${OS_PROJECT_NAME} and ~/.aws/credentials credentials with Access and Secret keys that you provided above.

          The EC2 profile is stored here:

            cat ~/.aws/config\n\n  [profile ''\"'\"'${OS_PROJECT_NAME}'\"'\"'']\n

          Where as Credentials are store here:

            cat ~/.aws/credentials\n\n  ['${OS_PROJECT_NAME}']\n  aws_access_key_id = <EC2_ACCESS_KEY>\n  aws_secret_access_key = <EC2_SECRET_KEY>\n

          Then you can manually create the configuration file for AWS cli in your home directory ~/.aws/config with the ec2 profile and credentials as shown below:

            cat ~/.aws/config\n\n  ['${OS_PROJECT_NAME}']\n  aws_access_key_id = <EC2_ACCESS_KEY>\n  aws_secret_access_key = <EC2_SECRET_KEY>\n

          Information

          We need to have a profile that you use must have permissions to allow the AWS operations can be performed.

          "},{"location":"openstack/persistent-storage/object-storage/#listing-buckets-using-aws-cli","title":"Listing buckets using aws-cli","text":"

          i. Using s3api:

          aws --profile \"'${OS_PROJECT_NAME}'\" --endpoint-url=https://stack.nerc.mghpcc.org:13808 \\\n    s3api list-buckets\n\n{\n    \"Buckets\": [\n        {\n            \"Name\": \"unique-container-test\",\n            \"CreationDate\": \"2009-02-03T16:45:09+00:00\"\n        }\n    ],\n    \"Owner\": {\n        \"DisplayName\": \"Test Project-f69dcff:mmunakami@fas.harvard.edu\",\n        \"ID\": \"Test Project-f69dcff:mmunakami@fas.harvard.edu\"\n    }\n}\n

          ii. Alternatively, you can do the same using s3:

          aws --profile \"'${OS_PROJECT_NAME}'\" --endpoint-url=https://stack.nerc.mghpcc.org:13808 \\\n    s3 ls\n

          Output:

          2009-02-03 11:45:09 unique-container-test\n
          "},{"location":"openstack/persistent-storage/object-storage/#to-list-contents-inside-bucket","title":"To list contents inside bucket","text":"
          aws --profile \"'${OS_PROJECT_NAME}'\" --endpoint-url=https://stack.nerc.mghpcc.org:13808 \\\n    s3 ls s3://<your-bucket>\n
          "},{"location":"openstack/persistent-storage/object-storage/#to-make-a-bucket","title":"To make a bucket","text":"
          aws --profile \"'${OS_PROJECT_NAME}'\" --endpoint-url=https://stack.nerc.mghpcc.org:13808 \\\n    s3 mb s3://<your-bucket>\n
          "},{"location":"openstack/persistent-storage/object-storage/#adding-copying-files-from-one-container-to-another-container","title":"Adding/ Copying files from one container to another container","text":"
          1. Single file copy using cp command:

            The aws tool provides a cp command to move files to your s3 bucket:

            aws --profile \"'${OS_PROJECT_NAME}'\" --endpoint-url=https://stack.nerc.mghpcc.org:13808 \\\n    s3 cp <Your-file> s3://<your-bucket>/\n

            Output:

            upload: .\\<Your-file> to s3://<your-bucket>/<Your-file>\n
          2. Whole directory copy using the --recursive flag:

            aws --profile \"'${OS_PROJECT_NAME}'\" --endpoint-url=https://stack.nerc.mghpcc.org:13808 \\\n    s3 cp <Your-directory> s3://<your-bucket>/ --recursive\n

            Output:

            upload: <your-directory>/<file0> to s3://<your-bucket>/<file0>\nupload: <your-directory>/<file1> to s3://<your-bucket>/<file1>\n...\nupload: <your-directory>/<fileN> to s3://<your-bucket>/<fileN>\n

          You can then use aws s3 ls to check that your files have been properly uploaded:

          aws --profile \"'${OS_PROJECT_NAME}'\" --endpoint-url=https://stack.nerc.mghpcc.org:13808 \\\n    s3 ls s3://<your-bucket>/\n

          Output:

          2022-04-04 16:32:38          <size> <file0>\n2022-04-04 16:32:38          <size> <file1>\n...\n2022-04-04 16:25:50          <size> <fileN>\n

          Other Useful Flags

          Additionally, aws cp provides an --exclude flag to filter files not to be transferred, the syntax is: --exclude \"<regex>\"

          "},{"location":"openstack/persistent-storage/object-storage/#to-delete-an-object-from-a-bucket","title":"To delete an object from a bucket","text":"
          aws --profile \"'${OS_PROJECT_NAME}'\" --endpoint-url=https://stack.nerc.mghpcc.org:13808 \\\n    s3 rm s3://<your-bucket>/argparse-1.2.1.tar.gz\n
          "},{"location":"openstack/persistent-storage/object-storage/#to-remove-a-bucket","title":"To remove a bucket","text":"
          aws --profile \"'${OS_PROJECT_NAME}'\" --endpoint-url=https://stack.nerc.mghpcc.org:13808 \\\n    s3 rb s3://<your-bucket>\n
          "},{"location":"openstack/persistent-storage/object-storage/#iv-using-s3cmd","title":"iv. Using s3cmd","text":"

          S3cmd is a free command-line tool and client for uploading, retrieving and managing data in Amazon S3 and other cloud storage service providers that use the S3 protocol.

          Prerequisites:

          • S3cmd installed, see Download and Install the latest version of the S3cmd for more information.
          "},{"location":"openstack/persistent-storage/object-storage/#configuring-s3cmd","title":"Configuring s3cmd","text":"

          The EC2_ACCESS_KEY and EC2_SECRET_KEY keys that you noted from ec2rc.sh file can then be plugged into s3cfg config file.

          The .s3cfg file requires the following configuration to work with our Object storage service:

          # Setup endpoint\nhost_base = stack.nerc.mghpcc.org:13808\nhost_bucket = stack.nerc.mghpcc.org:13808\nuse_https = True\n\n# Setup access keys\naccess_key = <YOUR_EC2_ACCESS_KEY_FROM_ec2rc_FILE>\nsecret_key = <YOUR_EC2_SECRET_KEY_FROM_ec2rc_FILE>\n\n# Enable S3 v4 signature APIs\nsignature_v2 = False\n

          We are assuming that the configuration file is placed in default location i.e. $HOME/.s3cfg. If it is not the case you need to add the parameter --config=FILE with the location of your configuration file to override the config location.

          "},{"location":"openstack/persistent-storage/object-storage/#using-s3cmd","title":"Using s3cmd","text":""},{"location":"openstack/persistent-storage/object-storage/#to-list-buckets","title":"To list buckets","text":"

          Use the following command to list all s3 buckets

          s3cmd ls\n

          Or,

          s3cmd ls s3://\n\n2009-02-03 16:45  s3://nerc-test-container\n2009-02-03 16:45  s3://second-mycontainer\n2009-02-03 16:45  s3://unique-container-test\n
          "},{"location":"openstack/persistent-storage/object-storage/#create-a-new-bucket","title":"Create a new bucket","text":"

          In order to create a bucket, you can use s3cmd with the following command

          s3cmd mb s3://mybucket\n\nBucket 's3://mybucket/' created\n\ns3cmd ls\n2009-02-03 16:45  s3://mybucket\n\n2009-02-03 16:45  s3://nerc-test-container\n2009-02-03 16:45  s3://second-mycontainer\n2009-02-03 16:45  s3://unique-container-test\n
          "},{"location":"openstack/persistent-storage/object-storage/#to-copy-an-object-to-bucket","title":"To copy an object to bucket","text":"

          Below command will upload file file.txt to the bucket using s3cmd command.

          s3cmd put ~/file.txt s3://mybucket/\n\nupload: 'file.txt' -> 's3://mybucket/file.txt'  [1 of 1]\n0 of 0     0% in    0s     0.00 B/s  done\n

          s3cmd also allows to set additional properties to the objects stored. In the example below, we set the content type with the --mime-type option and the cache-control parameter to 1 hour with --add-header.

          s3cmd put --mime-type='application/json' --add-header='Cache-Control: max-age=3600' ~/file.txt s3://mybucket\n
          "},{"location":"openstack/persistent-storage/object-storage/#uploading-directory-in-bucket","title":"Uploading Directory in bucket","text":"

          If we need to upload entire directory use -r to upload it recursively as below.

          s3cmd put -r <your-directory> s3://mybucket/\n\nupload: 'backup/hello.txt' -> 's3://mybucket/backup/hello.txt'  [1 of 1]\n0 of 0     0% in    0s     0.00 B/s  done\n
          "},{"location":"openstack/persistent-storage/object-storage/#list-the-objects-of-bucket","title":"List the objects of bucket","text":"

          List the objects of the bucket using ls switch with s3cmd.

          s3cmd ls s3://mybucket/\n\n                       DIR   s3://mybucket/backup/\n2022-04-05 03:10         0   s3://mybucket/file.txt\n2022-04-05 03:14         0   s3://mybucket/hello.txt\n
          "},{"location":"openstack/persistent-storage/object-storage/#to-copy-download-an-object-to-local-system","title":"To copy/ download an object to local system","text":"

          Use the following command to download files from the bucket:

          s3cmd get s3://mybucket/file.txt\n\ndownload: 's3://mybucket/file.txt' -> './file.txt'  [1 of 1]\n0 of 0     0% in    0s     0.00 B/s  done\n
          "},{"location":"openstack/persistent-storage/object-storage/#to-sync-local-filedirectory-to-a-bucket","title":"To sync local file/directory to a bucket","text":"
          s3cmd sync newdemo s3://mybucket\n\nupload: 'newdemo/newdemo_file.txt' -> 's3://mybucket/newdemo/newdemo_file.txt'  [1 of 1]\n0 of 0     0% in    0s     0.00 B/s  done\n
          "},{"location":"openstack/persistent-storage/object-storage/#to-sync-bucket-or-object-with-local-filesystem","title":"To sync bucket or object with local filesystem","text":"
          s3cmd sync  s3://unique-container-test otherlocalbucket\n\ndownload: 's3://unique-container-test/README.md' -> 'otherlocalbucket/README.md'  [1 of 3]\n653 of 653   100% in    0s     4.54 kB/s  done\ndownload: 's3://unique-container-test/image.png' -> 'otherlocalbucket/image.png'  [2 of 3]\n0 of 0     0% in    0s     0.00 B/s  done\ndownload: 's3://unique-container-test/test-file' -> 'otherlocalbucket/test-file'  [3 of 3]\n12 of 12   100% in    0s    83.83 B/s  done\nDone. Downloaded 665 bytes in 1.0 seconds, 665.00 B/s.\n
          "},{"location":"openstack/persistent-storage/object-storage/#to-delete-an-object-from-bucket","title":"To delete an object from bucket","text":"

          You can delete files from the bucket with the following s3cmd command

          s3cmd del s3://unique-container-test/README.md\n\ndelete: 's3://unique-container-test/README.md'\n
          "},{"location":"openstack/persistent-storage/object-storage/#to-delete-directory-from-bucket","title":"To delete directory from bucket","text":"
          s3cmd del s3://mybucket/newdemo\n\ndelete: 's3://mybucket/newdemo'\n
          "},{"location":"openstack/persistent-storage/object-storage/#to-delete-a-bucket","title":"To delete a bucket","text":"
          s3cmd rb s3://mybucket\n\nERROR: S3 error: 409 (BucketNotEmpty): The bucket you tried to delete is not empty\n

          Important Information

          The above command failed because of the bucket was not empty! You can remove all objects inside the bucket and then use the command again. Or, you can run the following command with -r or --recursive flag i.e. s3cmd rb s3://mybucket -r or s3cmd rb s3://mybucket --recursive.

          "},{"location":"openstack/persistent-storage/object-storage/#v-using-rclone","title":"v. Using Rclone","text":"

          rclone is a convenient and performant command-line tool for transferring files and synchronizing directories directly between your local file systems and the NERC's containers.

          Prerequisites:

          To run the rclone commands, you need to have:

          • rclone installed, see Downloading and Installing the latest version of the Rclone for more information.
          "},{"location":"openstack/persistent-storage/object-storage/#configuring-rclone","title":"Configuring Rclone","text":"

          First, you\u2019ll need to configure rclone. As the object storage systems have quite complicated authentication these are kept in a config file.

          If you run rclone config file you will see where the default location is for you.

          Note

          For Windows users, you many need to specify the full path to the Rclone executable file, if its not included in your systems PATH variable.

          The EC2_ACCESS_KEY and EC2_SECRET_KEY keys that you noted from ec2rc.sh file can then be plugged into rclone config file.

          Edit the config file's content on the path location described by rclone config file command and add the following entry with the name [nerc]:

          [nerc]\ntype = s3\nenv_auth = false\nprovider = Other\nendpoint = https://stack.nerc.mghpcc.org:13808\nacl = public-read\naccess_key_id = <YOUR_EC2_ACCESS_KEY_FROM_ec2rc_FILE>\nsecret_access_key = <YOUR_EC2_SECRET_KEY_FROM_ec2rc_FILE>\nlocation_constraint =\nserver_side_encryption =\n

          More about the config for AWS S3 compatible API can be seen here.

          Important Information

          Mind that if set env_auth = true then it will take variables from environment, so you shouldn't insert it in this case.

          OR, You can locally copy this content to a new config file and then use this flag to override the config location, e.g. rclone --config=FILE

          Interactive Configuration

          Run rclone config to setup. See rclone config docs for more details.

          "},{"location":"openstack/persistent-storage/object-storage/#using-rclone","title":"Using Rclone","text":"

          rclone supports many subcommands (see the complete list of Rclone subcommands). A few commonly used subcommands (assuming you configured the NERC Object Storage as nerc):

          "},{"location":"openstack/persistent-storage/object-storage/#listing-the-containers-and-contains-of-a-container","title":"Listing the Containers and Contains of a Container","text":"

          Once your Object Storage has been configured in Rclone, you can then use the Rclone interface to List all the Containers with the \"lsd\" command

          rclone lsd \"nerc:\"\n

          Or,

          rclone lsd \"nerc:\" --config=rclone.conf\n

          For e.g.,

          rclone lsd \"nerc:\" --config=rclone.conf\n        -1 2009-02-03 11:45:09        -1 second-mycontainer\n        -1 2009-02-03 11:45:09        -1 unique-container-test\n

          To list the files and folders available within a container i.e. \"unique-container-test\" in this case, within a container we can use the \"ls\" command:

          rclone ls \"nerc:unique-container-test/\"\n  653 README.md\n    0 image.png\n   12 test-file\n
          "},{"location":"openstack/persistent-storage/object-storage/#uploading-and-downloading-files-and-folders","title":"Uploading and Downloading Files and Folders","text":"

          rclone support a variety of options to allow you to Copy, Sync and Move files from one destination to another.

          A simple example of this can be seen below, where we copy (Upload) the file \"upload.me\" to the <your-bucket> container:

          rclone copy \"./upload.me\" \"nerc:<your-bucket>/\"\n

          Another example, to copy (Download) the file \"upload.me\" from the <your-bucket> container to your local:

          rclone -P copy \"nerc:<your-bucket>/upload.me\" \"./\"\n

          Also, to Sync files into to the <your-bucket> container - try with --dry-run first

          rclone --dry-run sync /path/to/files nerc:<your-bucket>\n

          Then sync for real

          rclone sync /path/to/files nerc:<your-bucket>\n
          "},{"location":"openstack/persistent-storage/object-storage/#mounting-object-storage-on-local-filesystem","title":"Mounting object storage on local filesystem","text":"

          Linux:

          First, you need to create a directory on which you will mount your filesystem:

          mkdir ~/mnt-rclone

          Then you can simply mount your object storage with:

          rclone -vv --vfs-cache-mode writes mount nerc: ~/mnt-rclone

          More about using Rclone

          You can read more about Rclone Mounting here.

          Windows:

          First you have to download Winfsp:

          WinFsp is an open source Windows File System Proxy which provides a FUSE emulation layer.

          Then you can simply mount your object storage with (no need to create the directory in advance):

          rclone -vv --vfs-cache-mode writes mount nerc: C:/mnt-rclone

          vfs-cache-mode flag enable file caching, you can use either writes or full option. For further explanation you can see official documentation.

          Now that your object storage is mounted, you can list, create and delete files in it.

          "},{"location":"openstack/persistent-storage/object-storage/#unmount-object-storage","title":"Unmount object storage","text":"

          To unmount, simply press CTRL-C and the mount will be interrupted.

          "},{"location":"openstack/persistent-storage/object-storage/#vi-using-client-python-libraries","title":"vi. Using client (Python) libraries","text":"

          a. The EC2_ACCESS_KEY and EC2_SECRET_KEY keys that you noted from ec2rc.sh file can then be plugged into your application. See below example using the Python Boto3 library, which connects through the S3 API interface through EC2 credentials, and perform some basic operations on available buckets and file that the user has access to.

          import boto3\n\n# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#bucket\ns3 = boto3.resource('s3',\n    aws_access_key_id='YOUR_EC2_ACCESS_KEY_FROM_ec2rc_FILE',\n    aws_secret_access_key='YOUR_EC2_SECRET_KEY_FROM_ec2rc_FILE', #pragma: allowlist secret\n    endpoint_url='https://stack.nerc.mghpcc.org:13808',\n)\n\n# List all containers\nfor bucket in s3.buckets.all():\n    print(' ->', bucket)\n\n# List all objects in a container i.e. unique-container-test is your current Container\nbucket = s3.Bucket('unique-container-test')\nfor obj in bucket.objects.all():\n    print(' ->', obj)\n\n# Download an S3 object i.e. test-file a file available in your unique-container-test Container\ns3.Bucket('unique-container-test').download_file('test-file', './test-file.txt')\n\n# Add an image to the bucket\n# bucket.put_object(Body=open('image.png', mode='rb'), Key='image.png')\n

          We can configure the Python Boto3 library, to work with the saved aws profile.

          import boto3\n\n# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html\nsession = boto3.Session(profile_name='<YOUR_CONFIGURED_AWS_PROFILE_NAME>')\n\n# List all containers\ns3 = boto3.client('s3', endpoint_url='https://stack.nerc.mghpcc.org:13808',)\nresponse = s3.list_buckets()\n\nfor bucket in response['Buckets']:\n    print(' ->', bucket)\n

          b. The EC2_ACCESS_KEY and EC2_SECRET_KEY keys that you noted from ec2rc.sh file can then be plugged into your application. See below example using the Python Minio library, which connects through the S3 API interface through EC2 credentials, and perform some basic operations on available buckets and file that the user has access to.

          from minio import Minio\n\n# Create client with access key and secret key.\n# https://docs.min.io/docs/python-client-api-reference.html\nclient = Minio(\n    \"stack.nerc.mghpcc.org:13808\",\n    access_key='YOUR_EC2_ACCESS_KEY_FROM_ec2rc_FILE',\n    secret_key='YOUR_EC2_SECRET_KEY_FROM_ec2rc_FILE', #pragma: allowlist secret\n)\n\n# List all containers\nbuckets = client.list_buckets()\nfor bucket in buckets:\n    # print(bucket.name, bucket.creation_date)\n    print(' ->', bucket)\n\n# Make 'nerc-test-container' container if not exist.\nfound = client.bucket_exists(\"nerc-test-container\")\nif not found:\n    client.make_bucket(\"nerc-test-container\")\nelse:\n    print(\"Bucket 'nerc-test-container' already exists\")\n\n# Upload './nerc-backup.zip' as object name 'nerc-backup-2022.zip'\n# to bucket 'nerc-test-container'.\nclient.fput_object(\n    \"nerc-test-container\", \"nerc-backup-2022.zip\", \"./nerc-backup.zip\",\n)\n
          "},{"location":"openstack/persistent-storage/object-storage/#3-using-graphical-user-interface-gui-tools","title":"3. Using Graphical User Interface (GUI) Tools","text":""},{"location":"openstack/persistent-storage/object-storage/#i-using-winscp","title":"i. Using WinSCP","text":"

          WinSCP is a popular and free open-source SFTP client, SCP client, and FTP client for Windows. Its main function is file transfer between a local and a remote computer, with some basic file management functionality using FTP, FTPS, SCP, SFTP, WebDAV or S3 file transfer protocols.

          Prerequisites:

          • WinSCP installed, see Download and Install the latest version of the WinSCP for more information.

          • Go to WinSCP menu and open \"Options > Preferences\".

          • When the \"Preferences\" dialog window appears, select \"Transfer\" in the options on the left pane.

          • Click on \"Edit\" button.

          • Then, on shown popup dialog box review the \"Common options\" group, uncheck the \"Preserve timestamp\" option as shown below:

          "},{"location":"openstack/persistent-storage/object-storage/#configuring-winscp","title":"Configuring WinSCP","text":"
          • Click on \"New Session\" tab button as shown below:
          • Select \"Amazon S3\" from the \"File protocol\" dropdown options as shown below:
          • Provide the following required endpoint information:

          \"Host name\": \"stack.nerc.mghpcc.org\"

          \"Port number\": \"13808\"

          The EC2_ACCESS_KEY and EC2_SECRET_KEY keys that you noted from ec2rc.sh file can then be plugged into \"Access key ID\" and \"Secret access key\" respectively.

          Helpful Tips

          You can save your above configured session with some preferred name by clicking the \"Save\" button and then giving a proper name to your session. So that next time you don't need to again manually enter all your configuration.

          "},{"location":"openstack/persistent-storage/object-storage/#using-winscp","title":"Using WinSCP","text":"

          You can follow above step to manually add a new session next time you open WinSCP or, you can connect to your previously saved session (as listed on popup dialog will show your all saved session name list) that will show up by just clicking on the session name.

          Then click \"Login\" button to connect to your NERC project's Object Storage as shown below:

          "},{"location":"openstack/persistent-storage/object-storage/#ii-using-cyberduck","title":"ii. Using Cyberduck","text":"

          Cyberduck is a libre server and cloud storage browser for Mac and Windows. With an easy-to-use interface, connect to servers, enterprise file sharing, and cloud storage.

          Prerequisites:

          • Cyberduck installed, see Download and Install the latest version of the Cyberduck for more information.
          "},{"location":"openstack/persistent-storage/object-storage/#configuring-cyberduck","title":"Configuring Cyberduck","text":"
          • Click on \"Open Connection\" tab button as shown below:
          • Select \"Amazon S3\" from the dropdown options as shown below:
          • Provide the following required endpoint information:

          \"Server\": \"stack.nerc.mghpcc.org\"

          \"Port\": \"13808\"

          The EC2_ACCESS_KEY and EC2_SECRET_KEY keys that you noted from ec2rc.sh file can then be plugged into \"Access key ID\" and \"Secret Access Key\" respectively

          "},{"location":"openstack/persistent-storage/object-storage/#using-cyberduck","title":"Using Cyberduck","text":"

          Then click \"Connect\" button to connect to your NERC project's Object Storage as shown below:

          "},{"location":"openstack/persistent-storage/transfer-a-volume/","title":"Transfer A Volume","text":"

          You may wish to transfer a volume to a different project. Volumes are specific to a project and can only be attached to one virtual machine at a time.

          Important

          The volume to be transferred must not be attached to an instance. This can be examined by looking into \"Status\" column of the volume i.e. it need to be \"Available\" instead of \"In-use\" and \"Attached To\" column need to be empty.

          "},{"location":"openstack/persistent-storage/transfer-a-volume/#using-horizon-dashboard","title":"Using Horizon dashboard","text":"

          Once you're logged in to NERC's Horizon dashboard.

          Navigate to Project -> Volumes -> Volumes.

          Select the volume that you want to transfer and then click the dropdown next to the \"Edit volume\" and choose \"Create Transfer\".

          Give the transfer a name.

          You will see a screen like shown below. Be sure to capture the Transfer ID and the Authorization Key.

          Important Note

          You can always get the transfer ID later if needed, but there is no way to retrieve the key.

          If the key is lost before the transfer is completed, you will have to cancel the pending transfer and create a new one.

          Then the volume will show the status like below:

          Assuming you have access to the receiving project, switch to it using the Project dropdown at the top right.

          If you don't have access to the receiving project, give the transfer ID and Authorization Key to a collaborator who does, and have them complete the next steps.

          In the receiving project, go to the Volumes tab, and click \"Accept Transfer\" button as shown below:

          Enter the \"Transfer ID\" and the \"Authorization Key\" that were captured when the transfer was created in the previous project.

          The volume should now appear in the Volumes list of the receiving project as shown below:

          Important Note

          Any pending transfers can be cancelled if they are not yet accepted, but there is no way to \"undo\" a transfer once it is complete. To send the volume back to the original project, a new transfer would be required.

          "},{"location":"openstack/persistent-storage/transfer-a-volume/#using-the-cli","title":"Using the CLI","text":"

          Prerequisites:

          To run the OpenStack CLI commands, you need to have:

          • OpenStack CLI setup, see OpenStack Command Line setup for more information.
          "},{"location":"openstack/persistent-storage/transfer-a-volume/#using-the-openstack-client","title":"Using the openstack client","text":"
          • Identifying volume to transfer in your source project

          openstack volume list +---------------------------+-----------+-----------+------+-------------+ | ID | Name | Status | Size | Attached to | +---------------------------+-----------+-----------+------+-------------+ | d8a5da4c-...-8b6678ce4936 | my-volume | available | 100 | | +---------------------------+-----------+-----------+------+-------------+

          • Create the transfer request

          openstack volume transfer request create my-volume +------------+--------------------------------------+ | Field | Value | +------------+--------------------------------------+ | auth_key | b92d98fec2766582 | | created_at | 2024-02-04T14:30:08.362907 | | id | a16494cf-cfa0-47f6-b606-62573357922a | | name | None | | volume_id | d8a5da4c-41c8-4c2d-b57a-8b6678ce4936 | +------------+--------------------------------------+

          Pro Tip

          If your volume name includes spaces, you need to enclose them in quotes, i.e. \"<VOLUME_NAME_OR_ID>\". For example: openstack volume transfer request create \"My Volume\"

          • The volume can be checked as in the transfer status using openstack volume transfer request list as follows and the volume is in status awaiting-transfer while running openstack volume show <VOLUME_NAME_OR_ID> as shown below:

          openstack volume transfer request list +---------------------------+------+--------------------------------------+ | ID | Name | Volume | +---------------------------+------+--------------------------------------+ | a16494cf-...-62573357922a | None | d8a5da4c-41c8-4c2d-b57a-8b6678ce4936 | +---------------------------+------+--------------------------------------+

          openstack volume show my-volume +------------------------------+--------------------------------------+ | Field | Value | +------------------------------+--------------------------------------+ ... | name | my-volume | ... | status | awaiting-transfer | +------------------------------+--------------------------------------+

          • The user of the destination project can authenticate and receive the authentication key reported above. The transfer can then be initiated.

          openstack volume transfer request accept --auth-key b92d98fec2766582 a16494cf-cfa0-47f6-b606-62573357922a +-----------+--------------------------------------+ | Field | Value | +-----------+--------------------------------------+ | id | a16494cf-cfa0-47f6-b606-62573357922a | | name | None | | volume_id | d8a5da4c-41c8-4c2d-b57a-8b6678ce4936 | +-----------+--------------------------------------+

          • And the results confirmed in the volume list for the destination project.

          openstack volume list +---------------------------+-----------+-----------+------+-------------+ | ID | Name | Status | Size | Attached to | +---------------------------+-----------+-----------+------+-------------+ | d8a5da4c-...-8b6678ce4936 | my-volume | available | 100 | | +---------------------------+-----------+-----------+------+-------------+

          "},{"location":"openstack/persistent-storage/volumes/","title":"Persistent Storage","text":""},{"location":"openstack/persistent-storage/volumes/#ephemeral-disk","title":"Ephemeral disk","text":"

          OpenStack offers two types of block storage: ephemeral storage and persistent volumes. Ephemeral storage is available only during the instance's lifespan, persisting across guest operating system reboots. However, once the instance is deleted, its associated storage is also removed. The size of ephemeral storage is determined by the virtual machine's flavor and remains constant for all virtual machines of that flavor. The service level for ephemeral storage relies on the underlying hardware.

          In its default configuration, when the instance is launched from an Image or an Instance Snapshot, the choice for utilizing persistent storage is configured by selecting the Yes option for \"Create New Volume\". Additionally, the \"Delete Volume on Instance Delete\" setting is pre-set to No as shown below:

          If you set the \"Create New Volume\" option to No, the instance will boot from either an image or a snapshot, with the instance only being attached to an ephemeral disk. It's crucial to note that this configuration does NOT create persistent block storage in the form of a Volume, which can pose risks. Consequently, the disk of the instance won't appear in the \"Volumes\" list. To mitigate potential data loss, we strongly recommend regularly taking a snapshot of such a running ephemeral instance, referred to as an \"instance snapshot\", especially if you want to safeguard or recover important states of your instance.

          Very Important Note

          Never use Ephemeral disk if you're setting up a production-level environment. When the instance is deleted, its associated ephemeral storage is also removed.

          "},{"location":"openstack/persistent-storage/volumes/#volumes","title":"Volumes","text":"

          A volume is a detachable block storage device, similar to a USB hard drive. You can attach a volume to only one instance.

          Unlike Ephemeral disk, Volumes are the Block Storage devices that you attach to instances to enable persistent storage. Users can attach a volume to a running instance or detach a volume and attach it to another instance at any time.

          Ownership of volumes can be transferred to another project by transferring it to another project as described here.

          Some uses for volumes:

          • Persistent data storage for ephemeral instances.

          • Transfer of data between projects

          • Bootable image where disk changes persist

          • Mounting the disk of one instance to another for troubleshooting

          "},{"location":"openstack/persistent-storage/volumes/#how-do-you-make-your-vm-setup-and-data-persistent","title":"How do you make your VM setup and data persistent?","text":"
          • By default, when the instance is launched from an Image or an Instance Snapshot, the choice for utilizing persistent storage is configured by selecting the Yes option for \"Create New Volume\". It's crucial to note that this configuration automatically creates persistent block storage in the form of a Volume instead of using Ephemeral disk, which appears in the \"Volumes\" list in the Horizon dashboard: Project -> Volumes -> Volumes.
          • By default, the setting for \"Delete Volume on Instance Delete\" is configured to use No. This setting ensures that the volume created during the launch of a virtual machine remains persistent and won't be deleted alongside the instance unless explicitly chosen as \"Yes\". Such instances boot from a bootable volume, utilizing an existing volume listed in the Project -> Volumes -> Volumes menu.

          To minimize the risk of potential data loss, we highly recommend consistently creating backups through snapshots. You can opt for a \"volume snapshot\" if you only need to capture the volume's data. However, if your VM involves extended running processes and vital in-memory data, preserving the precise VM state is essential. In such cases, we recommend regularly taking a snapshot of the entire instance, known as an \"instance snapshot\", provided you have sufficient Volume Storage quotas, specifically the \"OpenStack Volume Quota (GiB)\" allocated for your resource allocation. Please ensure that your allocation includes sufficient quota for the \"OpenStack Number of Volumes Quota\" to allow for the creation of additional volumes based on your quota attributes. Utilizing snapshots for backups is of utmost importance, particularly when safeguarding or recovering critical states and data from your instance.

          Very Important: Requested/Approved Allocated Storage Quota and Cost

          When you delete virtual machines backed by persistent volumes, the disk data is retained, continuing to consume approved storage resources for which you will still be billed. It's important to note that the Storage quotas for NERC (OpenStack) Resource Allocations, are specified by the \"OpenStack Volume Quota (GiB)\" and \"OpenStack Swift Quota (GiB)\" allocation attributes. Storage cost is determined by your requested and approved allocation values to reserve storage from the total NESE storage pool.

          If you request additional storage by specifying a changed quota value for the \"OpenStack Volume Quota (GiB)\" and \"OpenStack Swift Quota (GiB)\" allocation attributes through NERC's ColdFront interface, invoicing for the extra storage will take place upon fulfillment or approval of your request, as explained in our Billing FAQs.

          Conversely, if you request a reduction in the Storage quotas by specifying a reduced quota value for the \"OpenStack Volume Quota (GiB)\" and \"OpenStack Swift Quota in Gigabytes\" allocation attributes through a change request using ColdFront, your invoicing will be adjusted accordingly when the request is submitted.

          In both scenarios, 'invoicing' refers to the accumulation of hours corresponding to the added or removed storage quantity.

          Help Regarding Billing

          Please send your questions or concerns regarding Storage and Cost by emailing us at help@nerc.mghpcc.org or, by submitting a new ticket at the NERC's Support Ticketing System.

          "},{"location":"other-tools/","title":"Kubernetes","text":"
          • Kubernetes Overview

          • K8s Flavors Comparision

          "},{"location":"other-tools/#i-kubernetes-development-environment","title":"i. Kubernetes Development environment","text":"
          1. Minikube

          2. Kind

          3. MicroK8s

          4. K3s

            4.a. K3s with High Availibility(HA) setup

            4.b. Multi-master HA K3s cluster using k3sup

            4.c. Single-Node K3s Cluster using k3d

            4.d. Multi-master K3s cluster setup using k3d

          5. k0s

          "},{"location":"other-tools/#ii-kubernetes-production-environment","title":"ii. Kubernetes Production environment","text":"
          1. Kubeadm

            1.a. Bootstrapping cluster with kubeadm

            1.b. Creating a HA cluster with kubeadm

          2. Kubespray

          "},{"location":"other-tools/#ci-cd-tools","title":"CI/ CD Tools","text":"
          • CI/CD Overview

          • Using Jenkins

            • Setup Jenkins CI/CD Pipeline

            • GitHub to Jenkins Pipeline

          • Using Github Actions

            • GitHub Actions CI/CD Pipeline
          "},{"location":"other-tools/#apache-spark","title":"Apache Spark","text":"
          • Apache Spark
          "},{"location":"other-tools/CI-CD/CI-CD-pipeline/","title":"What is Continuous Integration/Continuous Delivery (CI/CD) Pipeline?","text":"

          A Continuous Integration/Continuous Delivery (CI/CD) pipeline involves a series of steps that is performed in order to deliver a new version of application. CI/CD pipelines are a practice focused on improving software delivery using automation.

          "},{"location":"other-tools/CI-CD/CI-CD-pipeline/#components-of-a-cicd-pipeline","title":"Components of a CI/CD pipeline","text":"

          The steps that form a CI/CD pipeline are distinct subsets of tasks that are grouped into a pipeline stage. Typical pipeline stages include:

          • Build - The stage where the application is compiled.

          • Test - The stage where code is tested. Automation here can save both time and effort.

          • Release - The stage where the application is delivered to the central repository.

          • Deploy - In this stage code is deployed to production environment.

          • Validation and compliance - The steps to validate a build are determined by the needs of your organization. Image security scanning, security scanning and code analysis of applications ensure the quality of images and written application's code.

          Figure: CI/CD Pipeline Stages

          "},{"location":"other-tools/CI-CD/github-actions/setup-github-actions-pipeline/","title":"How to setup GitHub Actions Pipeline","text":"

          GitHub Actions gives you the ability to create workflows to automate the deployment process to OpenShift. GitHub Actions makes it easy to automate all your CI/CD workflows.

          "},{"location":"other-tools/CI-CD/github-actions/setup-github-actions-pipeline/#terminiology","title":"Terminiology","text":""},{"location":"other-tools/CI-CD/github-actions/setup-github-actions-pipeline/#workflow","title":"Workflow","text":"

          Automation-as-code that you can set up in your repository.

          "},{"location":"other-tools/CI-CD/github-actions/setup-github-actions-pipeline/#events","title":"Events","text":"

          30+ workflow triggers, including on schedule and from external systems.

          "},{"location":"other-tools/CI-CD/github-actions/setup-github-actions-pipeline/#actions","title":"Actions","text":"

          Community-powered units of work that you can use as steps to create a job in a workflow.

          "},{"location":"other-tools/CI-CD/github-actions/setup-github-actions-pipeline/#deploy-an-application-to-your-nerc-openshift-project","title":"Deploy an Application to your NERC OpenShift Project","text":"
          • Prerequisites

            You must have at least one active NERC-OCP (OpenShift) type resource allocation. You can refer to this documentation on how to get allocation and request \"NERC-OCP (OpenShift)\" type resource allocations.

          "},{"location":"other-tools/CI-CD/github-actions/setup-github-actions-pipeline/#steps","title":"Steps","text":"
          1. Access to the NERC's OpenShift Container Platform at https://console.apps.shift.nerc.mghpcc.org as described here. To get access to NERC's OCP web console you need to be part of ColdFront's active allocation.

          2. Setup the OpenShift CLI Tools locally and configure the OpenShift CLI to enable oc commands. Refer to this user guide.

          3. Setup Github CLI on your local machine as described here and verify you are able to run gh commands as shown below:

          4. Fork the simple-node-app App in your own Github:

            This application runs a simple node.js server and serves up some static routes with some static responses. This demo shows a simple container based app can easily be bootstrapped onto your NERC OpenShift project space.

            Very Important Information

            As you won't have full access to this repository, we recommend first forking the repository on your own GitHub account. So, you'll need to update all references to https://github.com/nerc-project/simple-node-app.git to point to your own forked repository.

            To create a fork of the example simple-node-app repository:

            1. Go to https://github.com/nerc-project/simple-node-app.

            2. Cick the \"Fork\" button to create a fork in your own GitHub account, e.g. \"https://github.com/<github_username>/simple-node-app\".

          5. Clone the simple-node-app git repository:

            git clone https://github.com//simple-node-app.git cd simple-node-app

          6. Run either setsecret.cmd file if you are using Windows or setsecret.sh file if you are using Linux based machine. Once executed, verify Github Secrets are set Properly under your github repo's settings >> secrets and variables >> Actions as shown here:

          7. Enable and Update GitHub Actions Pipeline on your own forked repo:

            • Enable the OpenShift Workflow in the Actions tab of in your GitHub repository.

            • Update the provided sample OpenShift workflow YAML file i.e. openshift.yml, which is located at \"https://github.com/<github_username>/simple-node-app/actions/workflows/openshift.yml\".

              Very Important Information

              Workflow execution on OpenShift pipelines follows these steps:

              1. Checkout your repository
              2. Perform a container image build
              3. Push the built image to the GitHub Container Registry (GHCR) or your preferred Registry
              4. Log in to your NERC OpenShift cluster's project space
              5. Create an OpenShift app from the image and expose it to the internet
          8. Edit the top-level 'env' section as marked with '\ud83d\udd8a\ufe0f' if the defaults are not suitable for your project.

          9. (Optional) Edit the build-image step to build your project:

            The default build type uses a Dockerfile at the root of the repository, but can be replaced with a different file, a source-to-image build, or a step-by-step buildah build.

          10. Commit and push the workflow file to your default branch to trigger a workflow run as shown below:

          11. Verify that you can see the newly deployed application on the NERC's OpenShift Container Platform at https://console.apps.shift.nerc.mghpcc.org as described here, and ensure that it can be browsed properly.

          12. That's it! Every time you commit changes to your GitHub repo, GitHub Actions will trigger your configured Pipeline, which will ultimately deploy your application to your own NERC OpenShift Project.

            "},{"location":"other-tools/CI-CD/jenkins/integrate-your-GitHub-repository/","title":"How to Integrate Your GitHub Repository to Your Jenkins Project","text":"

            This explains how to add a GitHub Webhook in your Jenkins Pipeline that saves your time and keeps your project updated all the time.

            Prerequisite

            You need to have setup CI/CD Pipelines on NERC's OpenStack by following this document.

            "},{"location":"other-tools/CI-CD/jenkins/integrate-your-GitHub-repository/#what-is-a-webhook","title":"What is a webhook?","text":"

            A webhook is an HTTP callback, an HTTP POST that occurs when something happens through a simple event-notification via HTTP POST. Github provides its own webhooks options for such tasks.

            "},{"location":"other-tools/CI-CD/jenkins/integrate-your-GitHub-repository/#configuring-github","title":"Configuring GitHub","text":"

            Let's see how to configure and add a webhook in GitHub:

            1. Go to your forked GitHub project repository.

            2. Click on \"Settings\". in the right corner as shown below:

            3. Click on \"Webhooks\" and then \"Click \"Add webhooks\".

            4. In the \"Payload URL\" field paste your Jenkins environment URL. At the end of this URL add /github-webhook/ using http://<Floating-IP>:8080/github-webhook/ i.e. http://199.94.60.4:8080/github-webhook/. Select \"Content type\" as \"application/json\" and leave the \"Secret\" field empty.

            5. In the page \"Which events would you like to trigger this webhook?\" select the option \"Let me select individual events\". Then, check \"Pull Requests\" and \"Pushes\". At the end of this option, make sure that the \"Active\" option is checked and then click on \"Add webhook\" button.

            We're done with the configuration on GitHub's side! Now let's config on Jenkins side to use this webhook.

            That's it! in this way we can add a webhook to our job and ensure that everytime you commits your changes to your Github repo, GitHub will trigger your new Jenkins job. As we already had setup \"Github hook tirgger for GITScm polling\" for our Jenkins pipeline setup previously.

            "},{"location":"other-tools/CI-CD/jenkins/setup-jenkins-CI-CD-pipeline/","title":"How to Set Up Jenkins Pipeline on a VM","text":"

            This document will walk you through how to setup a minimal \"CI/CD Pipeline To Deploy To Kubernetes Cluster Using a CI/CD tool called Jenkins\" on your NERC's OpenStack environment. Jenkins uses the Kubernetes control plane on K8s Cluster to run pipeline tasks that enable DevOps to spend more time coding and testing and less time troubleshooting.

            Prerequisite

            You need Kubernetes cluster running in your OpenStack environment. To setup your K8s cluster please Read this.

            Figure: CI/CD Pipeline To Deploy To Kubernetes Cluster Using Jenkins on NERC

            "},{"location":"other-tools/CI-CD/jenkins/setup-jenkins-CI-CD-pipeline/#setup-a-jenkins-server-vm","title":"Setup a Jenkins Server VM","text":"
            • Launch 1 Linux machine based on ubuntu-20.04-x86_64 and cpu-su.2 flavor with 2vCPU, 8GB RAM, and 20GB storage.

            • Make sure you have added rules in the Security Groups to allow ssh using Port 22 access to the instance.

            • Setup a new Security Group with the following rules exposing port 8080 and attach it to your new instance.

            • Assign a Floating IP to your new instance so that you will be able to ssh into this machine:

              ssh ubuntu@<Floating-IP> -A -i <Path_To_Your_Private_Key>\n

              For example:

              ssh ubuntu@199.94.60.4 -A -i cloud.key\n

            Upon successfully SSH accessing the machine, execute the following dependencies:

            Very Important

            Run the following steps as non-root user i.e. ubuntu.

            • Update the repositories and packages:

              sudo apt-get update && sudo apt-get upgrade -y\n
            • Turn off swap

              swapoff -a\nsudo sed -i '/ swap / s/^/#/' /etc/fstab\n
            • Install curl and apt-transport-https

              sudo apt-get update && sudo apt-get install -y apt-transport-https curl\n
            "},{"location":"other-tools/CI-CD/jenkins/setup-jenkins-CI-CD-pipeline/#download-and-install-the-latest-version-of-docker-ce","title":"Download and install the latest version of Docker CE","text":"
            • Download and install Docker CE:

              curl -fsSL https://get.docker.com -o get-docker.sh\nsudo sh get-docker.sh\n
            • Configure the Docker daemon:

              sudo usermod -aG docker $USER && newgrp docker\n
            "},{"location":"other-tools/CI-CD/jenkins/setup-jenkins-CI-CD-pipeline/#install-kubectl","title":"Install kubectl","text":"

            kubectl: the command line util to talk to your cluster.

            • Download the Google Cloud public signing key and add key to verify releases

              curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo \\\n  apt-key add -\n
            • add kubernetes apt repo

              cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list\ndeb https://apt.kubernetes.io/ kubernetes-xenial main\nEOF\n
            • Install kubectl

              sudo apt-get update\nsudo apt-get install -y kubectl\n
            • apt-mark hold is used so that these packages will not be updated/removed automatically

              sudo apt-mark hold kubectl\n
            "},{"location":"other-tools/CI-CD/jenkins/setup-jenkins-CI-CD-pipeline/#install-a-jenkins-server-using-docker","title":"Install a Jenkins Server using Docker","text":"

            To install a Jenkins server using Docker run the following command:

            docker run -u 0 --privileged --name jenkins -it -d -p 8080:8080 -p 50000:50000 \\\n    -v /var/run/docker.sock:/var/run/docker.sock \\\n    -v $(which docker):/usr/bin/docker \\\n    -v $(which kubectl):/usr/bin/kubectl \\\n    -v /home/jenkins_home:/var/jenkins_home \\\n    jenkins/jenkins:latest\n

            Once successfully docker run, browse to http://<Floating-IP>:8080 this will show you where to get the initial Administrator password to get started i.e. /var/jenkins_home/secrets/initialAdminPassword as shown below:

            The /var/jenkins_home in Jenkins docker container is a mounted volume to the host's /home/jenkins_home so you can just browse to /home/jenkins_home/secrets/initialAdminPassword on your ssh'ed host machine to copy the same content from /var/jenkins_home/secrets/initialAdminPassword.

            Initial Admin Password

            If you can't find the Admin password at /var/jenkins_home/secrets/initialAdminPassword, then try to locate it at its original location, i.e. /home/jenkins_home/secrets/initialAdminPassword.

            OR, you can run docker ps on worker node where you run the Jenkins server. Note the Name of the docker container and then run: docker logs -f <jenkins_docker_container_name>. This will show the initial Administrator password on the terminal which you can copy and paste on the web GUI on the browser.

            Initial Admin Password

            When you run docker logs -f <jenkins_docker_container_name>, the initial password for the \"Admin\" user can be found between the rows of asterisks as shown below:

            • Once you login to the Jenkins Web UI by entering the admin password shown on CLI terminal, click on the \"Install suggested plugins\" button as shown below:

              Continue by selecting 'Skip and continue as admin' first as shown below:

              Then click the 'Save and Finish' button as shown below and then, Jenkins is ready to use.

            "},{"location":"other-tools/CI-CD/jenkins/setup-jenkins-CI-CD-pipeline/#install-the-required-plugins","title":"Install the required Plugins","text":"
            • Jenkins has a wide range of plugin options. From your Jenkins dashboard navigate to \"Manage Jenkins > Manage Plugins\" as shown below:

              Select the \"Available\" tab and then locate Docker pipeline by searching and then click \"Install without restart\" button as shown below:

              Also, install the Kubernetes CLI plugin that allows you to configure kubectl commands on Jenkinsfile to interact with Kubernetes clusters as shown below:

            "},{"location":"other-tools/CI-CD/jenkins/setup-jenkins-CI-CD-pipeline/#create-the-required-credentials","title":"Create the required Credentials","text":"
            • Create a global credential for your Docker Hub Registry by providing the username and password that will be used by the Jenkins pipelines:

              1. Click on the \"Manage Jenkins\" menu and then click on the \"Manage Credentials\" link as shown below:

              2. Click on Jenkins Store as shown below:

              3. The credentials can be added by clicking the 'Add Credentials' button in the left pane.

            • First, add the 'DockerHub' credentials as 'Username with password' with the ID dockerhublogin.

              a. Select the Kind \"Username with password\" from the dropdown options.

              b. Provide your Docker Hub Registry's username and password.

              c. Give its ID and short description. ID is very important is that will need to be specify as used on your Jenkinsfile i.e. dockerhublogin.

            • Config the 'Kubeconfig' credentials as 'Secret file' that holds Kubeconfig file from K8s master i.e. located at /etc/kubernetes/admin.conf with the ID 'kubernetes'

              a. Click on the \"Add Credentials\" button in the left pane.

              b. Select the Kind \"Secret file\" from the dropdown options.

              c. On File section choose the config file that contains the EXACT content from your K8s master's kubeconfig file located at: /etc/kubernetes/admin.conf

              d. Give a ID and description that you will need to use on your Jenkinsfile i.e. kubernetes.

              e. Once both credentials are successfully added the following credentials are shown:

            "},{"location":"other-tools/CI-CD/jenkins/setup-jenkins-CI-CD-pipeline/#fork-the-nodeapp-app-in-your-own-github","title":"Fork the nodeapp App in your own Github","text":"

            Very Important Information

            As you won't have full access to this repository, we recommend first forking the repository on your own GitHub account. So, you'll need to update all references to https://github.com/nerc-project/nodeapp.git to point to your own forked repository.

            To create a fork of the example nodeapp repository:

            1. Go to https://github.com/nerc-project/nodeapp.

            2. Cick the \"Fork\" button to create a fork in your own GitHub account, e.g. \"https://github.com/<github_username>/nodeapp\".

            3. Review the \"Jenkinsfile\" that is included at the root of the forked git repo.

              Very Important Information

              A sample Jenkinsfile is available at the root of our demo application's Git repository, which we can reference in our Jenkins pipeline steps. For example, in this case, we are using this repository where our demo Node.js application resides.

            "},{"location":"other-tools/CI-CD/jenkins/setup-jenkins-CI-CD-pipeline/#modify-the-jenkins-declarative-pipeline-script-file","title":"Modify the Jenkins Declarative Pipeline Script file","text":"
            • Modify the provided \u2018Jenkinsfile\u2019 to specify your own Docker Hub account and github repository as specified in \"<dockerhub_username>\" and \"<github_username>\".

              Very Important Information

              You need to replace \"<dockerhub_username>\" and \"<github_username>\" with your actual DockerHub and GitHub usernames, respectively. Also, ensure that the global credentials IDs mentioned above match those used during the credential saving steps mentioned earlier. For instance, dockerhublogin corresponds to the DockerHub ID saved during the credential saving process for your Docker Hub Registry's username and password. Similarly, kubernetes corresponds to the 'Kubeconfig' ID assigned for the Kubeconfig credential file.

            • Below is an example of a Jenkins declarative Pipeline Script file:

              pipeline {

              environment {\n  dockerimagename = \"<dockerhub_username>/nodeapp:${env.BUILD_NUMBER}\"\n  dockerImage = \"\"\n}\n\nagent any\n\nstages {\n\n  stage('Checkout Source') {\n    steps {\n      git branch: 'main', url: 'https://github.com/<github_username>/nodeapp.git'\n    }\n  }\n\n  stage('Build image') {\n    steps{\n      script {\n        dockerImage = docker.build dockerimagename\n      }\n    }\n  }\n\n  stage('Pushing Image') {\n    environment {\n      registryCredential = 'dockerhublogin'\n    }\n    steps{\n      script {\n        docker.withRegistry('https://registry.hub.docker.com', registryCredential){\n          dockerImage.push()\n        }\n      }\n    }\n  }\n\n  stage('Docker Remove Image') {\n    steps {\n      sh \"docker rmi -f ${dockerimagename}\"\n      sh \"docker rmi -f registry.hub.docker.com/${dockerimagename}\"\n    }\n  }\n\n  stage('Deploying App to Kubernetes') {\n    steps {\n      sh \"sed -i 's/nodeapp:latest/nodeapp:${env.BUILD_NUMBER}/g' deploymentservice.yml\"\n      withKubeConfig([credentialsId: 'kubernetes']) {\n        sh 'kubectl apply -f deploymentservice.yml'\n      }\n    }\n  }\n}\n

              }

              Other way to Generate Pipeline Jenkinsfile

              You can generate your custom Jenkinsfile by clicking on \"Pipeline Syntax\" link shown when you create a new Pipeline when clicking the \"New Item\" menu link.

            "},{"location":"other-tools/CI-CD/jenkins/setup-jenkins-CI-CD-pipeline/#setup-a-pipeline","title":"Setup a Pipeline","text":"
            • Once you review the provided Jenkinsfile and understand the stages, you can now create a pipeline to trigger it on your newly setup Jenkins server:

              a. Click on the \"New Item\" link.

              b. Select the \"Pipeline\" link.

              c. Give name to your Pipeline i.e. \u201cjenkins-k8s-pipeline\u201d

              d. Select \"Build Triggers\" tab and then select Github hook tirgger for GITScm polling as shown below:

              e. Select \"Pipeline\" tab and then select the \"Pipeline script from SCM\" from the dropdown options. Then you need to specify the Git as SCM and also \"Repository URL\" for your public git repo and also specify your branch and Jenkinsfile's name as shown below:

              OR, You can copy/paste the contents of your Jenkinsfile on the given textbox. Please make sure you are selecting the \"Pipeline script\" from the dropdown options.

              f. Click on \"Save\" button.

            "},{"location":"other-tools/CI-CD/jenkins/setup-jenkins-CI-CD-pipeline/#how-to-manually-trigger-the-pipeline","title":"How to manually Trigger the Pipeline","text":"
            • Finally, click on the \"Build Now\" menu link on right side navigation that will triggers the Pipeline process i.e. Build docker image, Push Image to your Docker Hub Registry and Pull the image from Docker Registry, Remove local Docker images and then Deploy to K8s Cluster as shown below:

              You can see the deployment to your K8s Cluster is successful then you can browse the output using http://<Floating-IP>:<NodePort> as shown below:

              You can see the Console Output logs of this pipeline process by clicking the icon before the id of the started Pipeline on the right bottom corner.

              The pipeline stages after successful completion looks like below:

            We will continue on next documentation on how to setup GitHub Webhook in your Jenkins Pipeline so that Jenkins will trigger the build when a devops commits code to your GitHub repository's specific branch.

            "},{"location":"other-tools/apache-spark/spark/","title":"Apache Spark Cluster Setup on NERC OpenStack","text":""},{"location":"other-tools/apache-spark/spark/#apache-spark-overview","title":"Apache Spark Overview","text":"

            Apache Spark is increasingly recognized as the primary analysis suite for big data, particularly among Python users. Spark offers a robust Python API and includes several valuable built-in libraries such as MLlib for machine learning and Spark Streaming for real-time analysis. In contrast to Apache Hadoop, Spark performs most computations in main memory boosting the performance.

            Many modern computational tasks utilize the MapReduce parallel paradigm. This computational process comprises two stages: Map and Reduce. Before task execution, all data is distributed across the nodes of the cluster. During the \"Map\" stage, the master node dispatches the executable task to the other nodes, and each worker processes its respective data. The subsequent step is \"Reduce\" that involves the master node collecting all results from the workers and generating final results based on the workers' outcomes. Apache Spark also implements this model of computations so it provides Big Data Processing abilities.

            "},{"location":"other-tools/apache-spark/spark/#apache-spark-cluster-setup","title":"Apache Spark Cluster Setup","text":"

            To get a Spark standalone cluster up and running manually, all you need to do is spawn some VMs and start Spark as master on one of them and worker on the others. They will automatically form a cluster that you can connect to/from Python, Java, and Scala applications using the IP address of the master VM.

            "},{"location":"other-tools/apache-spark/spark/#setup-a-master-vm","title":"Setup a Master VM","text":"
            • To create a master VM for the first time, ensure that the \"Image\" dropdown option is selected. In this example, we selected ubuntu-22.04-x86_64 and the cpu-su.2 flavor is being used.

            • Make sure you have added rules in the Security Groups to allow ssh using Port 22 access to the instance.

            • Assign a Floating IP to your new instance so that you will be able to ssh into this machine:

              ssh ubuntu@<Floating-IP> -A -i <Path_To_Your_Private_Key>\n

              For example:

              ssh ubuntu@199.94.61.4 -A -i cloud.key\n
            • Upon successfully accessing the machine, execute the following dependencies:

              sudo apt-get -y update\nsudo apt install default-jre -y\n
            • Download and install Scala:

              wget https://downloads.lightbend.com/scala/2.13.10/scala-2.13.10.deb\nsudo dpkg -i scala-2.13.10.deb\nsudo apt-get install scala\n

              Note

              Installing Scala means installing various command-line tools such as the Scala compiler and build tools.

            • Download and unpack Apache Spark:

              SPARK_VERSION=\"3.4.2\"\nAPACHE_MIRROR=\"dlcdn.apache.org\"\n\nwget https://$APACHE_MIRROR/spark/spark-$SPARK_VERSION/spark-$SPARK_VERSION-bin-hadoop3-scala2.13.tgz\nsudo tar -zxvf spark-$SPARK_VERSION-bin-hadoop3-scala2.13.tgz\nsudo cp -far spark-$SPARK_VERSION-bin-hadoop3-scala2.13 /usr/local/spark\n

              Very Important Note

              Please ensure you are using the latest Spark version by modifying the SPARK_VERSION in the above script. Additionally, verify that the version exists on the APACHE_MIRROR website. Please note the value of SPARK_VERSION as you will need it during Preparing Jobs for Execution and Examination.

            • Create an SSH/RSA Key by running ssh-keygen -t rsa without using any passphrase:

              ssh-keygen -t rsa\n\nGenerating public/private rsa key pair.\nEnter file in which to save the key (/home/ubuntu/.ssh/id_rsa):\nEnter passphrase (empty for no passphrase):\nEnter same passphrase again:\nYour identification has been saved in /home/ubuntu/.ssh/id_rsa\nYour public key has been saved in /home/ubuntu/.ssh/id_rsa.pub\nThe key fingerprint is:\nSHA256:8i/TVSCfrkdV4+Jyqc00RoZZFSHNj8C0QugmBa7RX7U ubuntu@spark-master\nThe key's randomart image is:\n+---[RSA 3072]----+\n|      .. ..o..++o|\n|     o  o.. +o.+.|\n|    . +o  .o=+.oo|\n|     +.oo  +o++..|\n|    o EoS  .+oo  |\n|     . o   .+B   |\n|        .. +O .  |\n|        o.o..o   |\n|         o..     |\n+----[SHA256]-----+\n
            • Copy and append the contents of SSH public key i.e. ~/.ssh/id_rsa.pub to the ~/.ssh/authorized_keys file.

            "},{"location":"other-tools/apache-spark/spark/#create-a-volume-snapshot-of-the-master-vm","title":"Create a Volume Snapshot of the master VM","text":"
            • Once you're logged in to NERC's Horizon dashboard. You need to Shut Off the master vm before creating a volume snapshot.

              Click Action -> Shut Off Instance.

              Status will change to Shutoff.

            • Then, create a snapshot of its attached volume by clicking on the \"Create snapshot\" from the Project -> Volumes -> Volumes as described here.

            "},{"location":"other-tools/apache-spark/spark/#create-two-worker-instances-from-the-volume-snapshot","title":"Create Two Worker Instances from the Volume Snapshot","text":"
            • Once a snapshot is created and is in \"Available\" status, you can view and manage it under the Volumes menu in the Horizon dashboard under Volume Snapshots.

              Navigate to Project -> Volumes -> Snapshots.

            • You have the option to directly launch this volume as an instance by clicking on the arrow next to \"Create Volume\" and selecting \"Launch as Instance\".

              NOTE: Specify Count: 2 to launch 2 instances using the volume snapshot as shown below:

              Naming, Security Group and Flavor for Worker Nodes

              You can specify the \"Instance Name\" as \"spark-worker\", and for each instance, it will automatically append incremental values at the end, such as spark-worker-1 and spark-worker-2. Also, make sure you have attached the Security Groups to allow ssh using Port 22 access to the worker instances.

            Additionally, during launch, you will have the option to choose your preferred flavor for the worker nodes, which can differ from the master VM based on your computational requirements.

            • Navigate to Project -> Compute -> Instances.

            • Restart the shutdown master VM, click Action -> Start Instance.

            • The final set up for our Spark cluster looks like this, with 1 master node and 2 worker nodes:

            "},{"location":"other-tools/apache-spark/spark/#configure-spark-on-the-master-vm","title":"Configure Spark on the Master VM","text":"
            • SSH login into the master VM again.

            • Update the /etc/hosts file to specify all three hostnames with their corresponding internal IP addresses.

              sudo nano /etc/hosts\n

              Ensure all hosts are resolvable by adding them to /etc/hosts. You can modify the following content specifying each VM's internal IP addresses and paste the updated content at the end of the /etc/hosts file. Alternatively, you can use sudo cat >> /etc/hosts to append the content directly to the end of the /etc/hosts file.

              <Master-Internal-IP> master\n<Worker1-Internal-IP> worker1\n<Worker2-Internal-IP> worker2\n

              Very Important Note

              Make sure to use >> instead of > to avoid overwriting the existing content and append the new content at the end of the file.

              For example, the end of the /etc/hosts file looks like this:

              sudo cat /etc/hosts\n...\n192.168.0.46 master\n192.168.0.26 worker1\n192.168.0.136 worker2\n
            • Verify that you can SSH into both worker nodes by using ssh worker1 and ssh worker2 from the Spark master node's terminal.

            • Copy the sample configuration file for the Spark:

              cd /usr/local/spark/conf/\ncp spark-env.sh.template spark-env.sh\n
            • Update the environment variables file i.e. spark-env.sh to include the following information:

              export SPARK_MASTER_HOST='<Master-Internal-IP>'\nexport JAVA_HOME=<Path_of_JAVA_installation>\n

              Environment Variables

              Executing this command: readlink -f $(which java) will display the path to the current Java setup in your VM. For example: /usr/lib/jvm/java-11-openjdk-amd64/bin/java, you need to remove the last bin/java part, i.e. /usr/lib/jvm/java-11-openjdk-amd64, to set it as the JAVA_HOME environment variable. Learn more about other Spark settings that can be configured through environment variables here.

              For example:

              echo \"export SPARK_MASTER_HOST='192.168.0.46'\" >> spark-env.sh\necho \"export JAVA_HOME=/usr/lib/jvm/java-11-openjdk-amd64\" >> spark-env.sh\n
            • Source the changed environment variables file i.e. spark-env.sh:

              source spark-env.sh\n
            • Create a file named slaves in the Spark configuration directory (i.e., /usr/local/spark/conf/) that specifies all 3 hostnames (nodes) as specified in /etc/hosts:

              sudo cat slaves\nmaster\nworker1\nworker2\n
            "},{"location":"other-tools/apache-spark/spark/#run-the-spark-cluster-from-the-master-vm","title":"Run the Spark cluster from the Master VM","text":"
            • SSH into the master VM again if you are not already logged in.

            • You need to run the Spark cluster from /usr/local/spark:

              cd /usr/local/spark\n\n# Start all hosts (nodes) including master and workers\n./sbin/start-all.sh\n

              How to Stop All Spark Cluster

              To stop all of the Spark cluster nodes, execute ./sbin/stop-all.sh command from /usr/local/spark.

            "},{"location":"other-tools/apache-spark/spark/#connect-to-the-spark-webui","title":"Connect to the Spark WebUI","text":"

            Apache Spark provides a suite of web user interfaces (WebUIs) that you can use to monitor the status and resource consumption of your Spark cluster.

            Different types of Spark Web UI

            Apache Spark provides different web UIs: Master web UI, Worker web UI, and Application web UI.

            • You can connect to the Master web UI using SSH Port Forwarding, aka SSH Tunneling i.e. Local Port Forwarding from your local machine's terminal by running:

              ssh -N -L <Your_Preferred_Port>:localhost:8080 <User>@<Floating-IP> -i <Path_To_Your_Private_Key>\n

              Here, you can choose any port that is available on your machine as <Your_Preferred_Port> and then master VM's assigned Floating IP as <Floating-IP> and associated Private Key pair attached to the VM as <Path_To_Your_Private_Key>.

              For example:

              ssh -N -L 8080:localhost:8080 ubuntu@199.94.61.4 -i ~/.ssh/cloud.key\n
            • Once the SSH Tunneling is successful, please do not close or stop the terminal where you are running the SSH Tunneling. Instead, log in to the Master web UI using your web browser: http://localhost:<Your_Preferred_Port> i.e. http://localhost:8080.

            The Master web UI offers an overview of the Spark cluster, showcasing the following details:

            • Master URL and REST URL
            • Available CPUs and memory for the Spark cluster
            • Status and allocated resources for each worker
            • Details on active and completed applications, including their status, resources, and duration
            • Details on active and completed drivers, including their status and resources

            The Master web UI appears as shown below when you navigate to http://localhost:<Your_Preferred_Port> i.e. http://localhost:8080 from your web browser:

            The Master web UI also provides an overview of the applications. Through the Master web UI, you can easily identify the allocated vCPU (Core) and memory resources for both the Spark cluster and individual applications.

            "},{"location":"other-tools/apache-spark/spark/#preparing-jobs-for-execution-and-examination","title":"Preparing Jobs for Execution and Examination","text":"
            • To run jobs from /usr/local/spark, execute the following commands:

              cd /usr/local/spark\nSPARK_VERSION=\"3.4.2\"\n

              Very Important Note

              Please ensure you are using the same Spark version that you have downloaded and installed previously as the value of SPARK_VERSION in the above script.

            • Single Node Job:

              Let's quickly start to run a simple job:

              ./bin/spark-submit --driver-memory 2g --class org.apache.spark.examples.SparkPi examples/jars/spark-examples_2.13-$SPARK_VERSION.jar 50\n
            • Cluster Mode Job:

              Let's submit a longer and more complex job with many tasks that will be distributed among the multi-node cluster, and then view the Master web UI:

              ./bin/spark-submit --class org.apache.spark.examples.SparkPi --master spark://master:7077 examples/jars/spark-examples_2.13-$SPARK_VERSION.jar 1000\n

              While the job is running, you will see a similar view on the Master web UI under the \"Running Applications\" section:

              Once the job is completed, it will show up under the \"Completed Applications\" section on the Master web UI as shown below:

            "},{"location":"other-tools/kubernetes/comparisons/","title":"Comparison","text":""},{"location":"other-tools/kubernetes/comparisons/#kubespray-vs-kubeadm","title":"Kubespray vs Kubeadm","text":"

            Kubeadm provides domain Knowledge of Kubernetes clusters' life cycle management, including self-hosted layouts, dynamic discovery services and so on. Had it belonged to the new operators world, it may have been named a \"Kubernetes cluster operator\". Kubespray however, does generic configuration management tasks from the \"OS operators\" ansible world, plus some initial K8s clustering (with networking plugins included) and control plane bootstrapping.

            Kubespray has started using kubeadm internally for cluster creation since v2.3 in order to consume life cycle management domain knowledge from it and offload generic OS configuration things from it, which hopefully benefits both sides.

            "},{"location":"other-tools/kubernetes/k0s/","title":"k0s","text":""},{"location":"other-tools/kubernetes/k0s/#key-features","title":"Key Features","text":"
            • Available as a single static binary
            • Offers a self-hosted, isolated control plane
            • Supports a variety of storage backends, including etcd, SQLite, MySQL (or any compatible), and PostgreSQL.
            • Offers an Elastic control plane
            • Vanilla upstream Kubernetes
            • Supports custom container runtimes (containerd is the default)
            • Supports custom Container Network Interface (CNI) plugins (calico is the default)
            • Supports x86_64 and arm64
            "},{"location":"other-tools/kubernetes/k0s/#pre-requisite","title":"Pre-requisite","text":"

            We will need 1 VM to create a single node kubernetes cluster using k0s. We are using following setting for this purpose:

            • 1 Linux machine, ubuntu-22.04-x86_64 or your choice of Ubuntu OS image, cpu-su.2 flavor with 2vCPU, 8GB RAM, 20GB storage - also assign Floating IP to this VM.

            • setup Unique hostname to the machine using the following command:

              echo \"<node_internal_IP> <host_name>\" >> /etc/hosts\nhostnamectl set-hostname <host_name>\n

              For example:

              echo \"192.168.0.252 k0s\" >> /etc/hosts\nhostnamectl set-hostname k0s\n
            "},{"location":"other-tools/kubernetes/k0s/#install-k0s-on-ubuntu","title":"Install k0s on Ubuntu","text":"

            Run the below command on the Ubuntu VM:

            • SSH into k0s machine

            • Switch to root user: sudo su

            • Update the repositories and packages:

              apt-get update && apt-get upgrade -y\n
            • Download k0s:

              curl -sSLf https://get.k0s.sh | sudo sh\n
            • Install k0s as a service:

              k0s install controller --single\n\nINFO[2021-10-12 01:45:52] no config file given, using defaults\nINFO[2021-10-12 01:45:52] creating user: etcd\nINFO[2021-10-12 01:46:00] creating user: kube-apiserver\nINFO[2021-10-12 01:46:00] creating user: konnectivity-server\nINFO[2021-10-12 01:46:00] creating user: kube-scheduler\nINFO[2021-10-12 01:46:01] Installing k0s service\n
            • Start k0s as a service:

              k0s start\n
            • Check service, logs and k0s status:

              k0s status\n\nVersion: v1.22.2+k0s.1\nProcess ID: 16625\nRole: controller\nWorkloads: true\n
            • Access your cluster using kubectl:

              k0s kubectl get nodes\n\nNAME   STATUS   ROLES    AGE    VERSION\nk0s    Ready    <none>   8m3s   v1.22.2+k0s\n
              alias kubectl='k0s kubectl'\nkubectl get nodes -o wide\n
              kubectl get all\nNAME                 TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)   AGE\nservice/kubernetes   ClusterIP   10.96.0.1    <none>        443/TCP   38s\n
            "},{"location":"other-tools/kubernetes/k0s/#uninstall-k0s","title":"Uninstall k0s","text":"
            • Stop the service:

              sudo k0s stop\n
            • Execute the k0s reset command - cleans up the installed system service, data directories, containers, mounts and network namespaces.

              sudo k0s reset\n
            • Reboot the system

            "},{"location":"other-tools/kubernetes/kind/","title":"Kind","text":""},{"location":"other-tools/kubernetes/kind/#pre-requisite","title":"Pre-requisite","text":"

            We will need 1 VM to create a single node kubernetes cluster using kind. We are using following setting for this purpose:

            • 1 Linux machine, almalinux-9-x86_64, cpu-su.2 flavor with 2vCPU, 8GB RAM, 20GB storage - also assign Floating IP to this VM.

            • setup Unique hostname to the machine using the following command:

              echo \"<node_internal_IP> <host_name>\" >> /etc/hosts\nhostnamectl set-hostname <host_name>\n

              For example:

              echo \"192.168.0.167 kind\" >> /etc/hosts\nhostnamectl set-hostname kind\n
            "},{"location":"other-tools/kubernetes/kind/#install-docker-on-almalinux","title":"Install docker on AlmaLinux","text":"

            Run the below command on the AlmaLinux VM:

            • SSH into kind machine

            • Switch to root user: sudo su

            • Execute the below command to initialize the cluster:

              Please remove container-tools module that includes stable versions of podman, buildah, skopeo, runc, conmon, etc as well as dependencies and will be removed with the module. If this module is not removed then it will conflict with Docker. Red Hat does recommend Podman on RHEL 8.

              dnf module remove container-tools\n\ndnf update -y\n\ndnf config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo\n\ndnf install docker-ce docker-ce-cli containerd.io docker-compose-plugin\n\nsystemctl start docker\nsystemctl enable --now docker\nsystemctl status docker\n\ndocker -v\n
            "},{"location":"other-tools/kubernetes/kind/#install-kubectl-on-almalinux","title":"Install kubectl on AlmaLinux","text":"
            curl -LO \"https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl\"\nsudo install -o root -g root -m 0755 kubectl /usr/bin/kubectl\nchmod +x /usr/bin/kubectl\n
            • Test to ensure that the kubectl is installed:

              kubectl version --client\n
            "},{"location":"other-tools/kubernetes/kind/#install-kind","title":"Install kind","text":"
            curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.11.1/kind-linux-amd64\nchmod +x ./kind\nmv ./kind /usr/bin\n
            which kind\n\n/bin/kind\n
            kind version\n\nkind v0.11.1 go1.16.4 linux/amd64\n
            • To communicate with cluster, just give the cluster name as a context in kubectl:

              kind create cluster --name k8s-kind-cluster1\n\nCreating cluster \"k8s-kind-cluster1\" ...\n\u2713 Ensuring node image (kindest/node:v1.21.1) \ud83d\uddbc\n\u2713 Preparing nodes \ud83d\udce6\n\u2713 Writing configuration \ud83d\udcdc\n\u2713 Starting control-plane \ud83d\udd79\ufe0f\n\u2713 Installing CNI \ud83d\udd0c\n\u2713 Installing StorageClass \ud83d\udcbe\nSet kubectl context to \"kind-k8s-kind-cluster1\"\nYou can now use your cluster with:\n\nkubectl cluster-info --context kind-k8s-kind-cluster1\n\nHave a nice day! \ud83d\udc4b\n
            • Get the cluster details:

              kubectl cluster-info --context kind-k8s-kind-cluster1\n\nKubernetes control plane is running at https://127.0.0.1:38646\nCoreDNS is running at https://127.0.0.1:38646/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy\n\nTo further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.\n
              kubectl get all\n\nNAME                TYPE       CLUSTER-IP  EXTERNAL-IP  PORT(S)  AGE\nservice/kubernetes  ClusterIP  10.96.0.1   <none>       443/TCP  5m25s\n
              kubectl get nodes\n\nNAME                             STATUS  ROLES                AGE    VERSION\nk8s-kind-cluster1-control-plane  Ready  control-plane,master  5m26s  v1.21.1\n
            "},{"location":"other-tools/kubernetes/kind/#deleting-a-cluster","title":"Deleting a Cluster","text":"

            If you created a cluster with kind create cluster then deleting is equally simple:

            kind delete cluster\n
            "},{"location":"other-tools/kubernetes/kubernetes/","title":"Kubernetes Overview","text":"

            Kubernetes, commonly known as K8s is an open sourced container orchestration tool for managing containerized cloud-native workloads and services in computing, networking, and storage infrastructure. K8s can help to deploy and manage containerized applications like platforms as a service(PaaS), batch processing workers, and microservices in the cloud at scale. It reduces cloud computing costs while simplifying the operation of resilient and scalable applications. While it is possible to install and manage Kubernetes on infrastructure that you manage, it is a time-consuming and complicated process. To make provisioning and deploying clusters much easier, we have listed a number of popular platforms and tools to setup your K8s on your NERC's OpenStack Project space.

            "},{"location":"other-tools/kubernetes/kubernetes/#kubernetes-components-architecture","title":"Kubernetes Components & Architecture","text":"

            A Kubernetes cluster consists of a set of worker machines, called nodes, that run containerized applications. Every cluster has at least one worker node. The worker node(s) host the Pods that are the components of the application workload.

            The control plane or master manages the worker nodes and the Pods in the cluster. In production environments, the control plane usually runs across multiple computers and a cluster usually runs multiple nodes, providing fault-tolerance, redundancy, and high availability.

            Here's the diagram of a Kubernetes cluster with all the components tied together.

            "},{"location":"other-tools/kubernetes/kubernetes/#kubernetes-basics-workflow","title":"Kubernetes Basics workflow","text":"
            1. Create a Kubernetes cluster

            2. Deploy an app

            3. Explore your app

            4. Expose your app publicly

            5. Scale up your app

            6. Update your app

            "},{"location":"other-tools/kubernetes/kubernetes/#development-environment","title":"Development environment","text":"
            1. Minikube is a local Kubernetes cluster that focuses on making Kubernetes development and learning simple. Kubernetes may be started with just a single command if you have a Docker (or similarly comparable) container or a Virtual Machine environment. For more read this.

            2. Kind is a tool for running local Kubernetes clusters utilizing Docker container \"nodes\". It was built for Kubernetes testing, but it may also be used for local development and continuous integration. For more read this.

            3. MicroK8s is the smallest, fastest, and most conformant Kubernetes that tracks upstream releases and simplifies clustering. MicroK8s is ideal for prototyping, testing, and offline development. For more read this.

            4. K3s is a single <40MB binary, certified Kubernetes distribution developed by Rancher Labs and now a CNCF sandbox project that fully implements the Kubernetes API and is less than 40MB in size. To do so, they got rid of a lot of additional drivers that didn't need to be in the core and could easily be replaced with add-ons. For more read this.

              To setup a Multi-master HA K3s cluster using k3sup(pronounced ketchup) read this.

              To setup a Single-Node K3s Cluster using k3d read this and if you would like to setup Multi-master K3s cluster setup using k3d read this.

            5. k0s is an all-inclusive Kubernetes distribution, configured with all of the features needed to build a Kubernetes cluster simply by copying and running an executable file on each target host. For more read this.

            "},{"location":"other-tools/kubernetes/kubernetes/#production-environment","title":"Production environment","text":"

            If your Kubernetes cluster has to run critical workloads, it must be configured to be resilient and higly available(HA) production-ready Kubernetes cluster. To setup production-quality cluster, you can use the following deployment tools.

            1. Kubeadm performs the actions necessary to get a minimum viable, secure cluster up and running in a user friendly way. Bootstrapping cluster with kubeadm read this and if you would like to setup Multi-master cluster setup using Kubeadm read this.

            2. Kubespray helps to install a Kubernetes cluster on NERC OpenStack. Kubespray is a composition of Ansible playbooks, inventory, provisioning tools, and domain knowledge for generic OS/Kubernetes clusters configuration management tasks. Installing Kubernetes with Kubespray read this.

            To choose a tool which best fits your use case, read this comparison.

            "},{"location":"other-tools/kubernetes/kubespray/","title":"Kubespray","text":""},{"location":"other-tools/kubernetes/kubespray/#pre-requisite","title":"Pre-requisite","text":"

            We will need 1 control-plane(master) and 1 worker node to create a single control-plane kubernetes cluster using Kubespray. We are using following setting for this purpose:

            • 1 Linux machine for Ansible master, ubuntu-22.04-x86_64 or your choice of Ubuntu OS image, cpu-su.2 flavor with 2vCPU, 8GB RAM, 20GB storage.

            • 1 Linux machine for master, ubuntu-22.04-x86_64 or your choice of Ubuntu OS image, cpu-su.2 flavor with 2vCPU, 8GB RAM, 20GB storage - also assign Floating IP to the master node.

            • 1 Linux machines for worker, ubuntu-22.04-x86_64 or your choice of Ubuntu OS image, cpu-su.1 flavor with 1vCPU, 4GB RAM, 20GB storage.

            • ssh access to all machines: Read more here on how to set up SSH on your remote VMs.

            • To allow SSH from Ansible master to all other nodes: Read more here Generate SSH key for Ansible master node using:

              ssh-keygen -t rsa\n\nGenerating public/private rsa key pair.\nEnter file in which to save the key (/root/.ssh/id_rsa):\nEnter passphrase (empty for no passphrase):\nEnter same passphrase again:\nYour identification has been saved in /root/.ssh/id_rsa\nYour public key has been saved in /root/.ssh/id_rsa.pub\nThe key fingerprint is:\nSHA256:OMsKP7EmhT400AJA/KN1smKt6eTaa3QFQUiepmj8dxroot@ansible-master\nThe key's randomart image is:\n+---[RSA 3072]----+\n|=o.oo.           |\n|.o...            |\n|..=  .           |\n|=o.= ...         |\n|o=+.=.o SE       |\n|.+*o+. o. .      |\n|.=== +o. .       |\n|o+=o=..          |\n|++o=o.           |\n+----[SHA256]-----+\n

              Copy and append the contents of SSH public key i.e. ~/.ssh/id_rsa.pub to other nodes's ~/.ssh/authorized_keys file. Please make sure you are logged in as root user by doing sudo su before you copy this public key to the end of ~/.ssh/authorized_keys file of the other master and worker nodes. This will allow ssh <other_nodes_internal_ip> from the Ansible master node's terminal.

            • Create 2 security groups with appropriate ports and protocols:

              i. To be used by the master nodes:

              ii. To be used by the worker nodes:

            • setup Unique hostname to each machine using the following command:

              echo \"<node_internal_IP> <host_name>\" >> /etc/hosts\nhostnamectl set-hostname <host_name>\n

              For example:

              echo \"192.168.0.224 ansible_master\" >> /etc/hosts\nhostnamectl set-hostname ansible_master\n

            In this step, you will update packages and disable swap on the all 3 nodes:

            • 1 Ansible Master Node - ansible_master

            • 1 Kubernetes Master Node - kubspray_master

            • 1 Kubernetes Worker Node - kubspray_worker1

            The below steps will be performed on all the above mentioned nodes:

            • SSH into all the 3 machines

            • Switch as root: sudo su

            • Update the repositories and packages:

              apt-get update && apt-get upgrade -y\n
            • Turn off swap

              swapoff -a\nsed -i '/ swap / s/^/#/' /etc/fstab\n
            "},{"location":"other-tools/kubernetes/kubespray/#configure-kubespray-on-ansible_master-node-using-ansible-playbook","title":"Configure Kubespray on ansible_master node using Ansible Playbook","text":"

            Run the below command on the master node i.e. master that you want to setup as control plane.

            • SSH into ansible_master machine

            • Switch to root user: sudo su

            • Execute the below command to initialize the cluster:

            • Install Python3 and upgrade pip to pip3:

              apt install python3-pip -y\npip3 install --upgrade pip\npython3 -V && pip3 -V\npip -V\n
            • Clone the Kubespray git repository:

              git clone https://github.com/kubernetes-sigs/kubespray.git\ncd kubespray\n
            • Install dependencies from requirements.txt:

              pip install -r requirements.txt\n
            • Copy inventory/sample as inventory/mycluster

              cp -rfp inventory/sample inventory/mycluster\n
            • Update Ansible inventory file with inventory builder:

              This step is little trivial because we need to update hosts.yml with the nodes IP.

              Now we are going to declare a variable \"IPS\" for storing the IP address of other K8s nodes .i.e. kubspray_master(192.168.0.130), kubspray_worker1(192.168.0.32)

              declare -a IPS=(192.168.0.130 192.168.0.32)\nCONFIG_FILE=inventory/mycluster/hosts.yml python3 \\\n    contrib/inventory_builder/inventory.py ${IPS[@]}\n

              This outputs:

              DEBUG: Adding group all\nDEBUG: Adding group kube_control_plane\nDEBUG: Adding group kube_node\nDEBUG: Adding group etcd\nDEBUG: Adding group k8s_cluster\nDEBUG: Adding group calico_rr\nDEBUG: adding host node1 to group all\nDEBUG: adding host node2 to group all\nDEBUG: adding host node1 to group etcd\nDEBUG: adding host node1 to group kube_control_plane\nDEBUG: adding host node2 to group kube_control_plane\nDEBUG: adding host node1 to group kube_node\nDEBUG: adding host node2 to group kube_node\n
            • After running the above commands do verify the hosts.yml and its content:

              cat inventory/mycluster/hosts.yml\n

              The contents of the hosts.yml file should looks like:

              all:\n    hosts:\n        node1:\n            ansible_host: 192.168.0.130\n            ip: 192.168.0.130\n            access_ip: 192.168.0.130\n        node2:\n            ansible_host: 192.168.0.32\n            ip: 192.168.0.32\n            access_ip: 192.168.0.32\n    children:\n        kube_control_plane:\n            hosts:\n                node1:\n                node2:\n        kube_node:\n            hosts:\n                node1:\n                node2:\n        etcd:\n            hosts:\n                node1:\n        k8s_cluster:\n            children:\n                kube_control_plane:\n                kube_node:\n        calico_rr:\n            hosts: {}\n
            • Review and change parameters under inventory/mycluster/group_vars

              cat inventory/mycluster/group_vars/all/all.yml\ncat inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml\n
            • It can be useful to set the following two variables to true in inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml: kubeconfig_localhost (to make a copy of kubeconfig on the host that runs Ansible in { inventory_dir }/artifacts) and kubectl_localhost (to download kubectl onto the host that runs Ansible in { bin_dir }).

              Very Important

              As Ubuntu 20 kvm kernel doesn't have dummy module we need to modify the following two variables in inventory/mycluster/group_vars/k8s_cluster/k8s-cluster.yml: enable_nodelocaldns: false and kube_proxy_mode: iptables which will Disable nodelocal dns cache and Kube-proxy proxyMode to iptables respectively.

            • Deploy Kubespray with Ansible Playbook - run the playbook as root user. The option --become is required, as for example writing SSL keys in /etc/, installing packages and interacting with various systemd daemons. Without --become the playbook will fail to run!

              ansible-playbook -i inventory/mycluster/hosts.yml --become --become-user=root cluster.yml\n

              Note

              Running ansible playbook takes little time because it depends on the network bandwidth also.

            "},{"location":"other-tools/kubernetes/kubespray/#install-kubectl-on-kubernetes-master-node-ie-kubspray_master","title":"Install kubectl on Kubernetes master node .i.e. kubspray_master","text":"
            • Install kubectl binary

              snap install kubectl --classic\n

              This outputs: kubectl 1.26.1 from Canonical\u2713 installed

            • Now verify the kubectl version:

              kubectl version -o yaml\n
            "},{"location":"other-tools/kubernetes/kubespray/#validate-all-cluster-components-and-nodes-are-visible-on-all-nodes","title":"Validate all cluster components and nodes are visible on all nodes","text":"
            • Verify the cluster

              kubectl get nodes\n\nNAME    STATUS   ROLES                  AGE     VERSION\nnode1   Ready    control-plane,master   6m7s    v1.26.1\nnode2   Ready    control-plane,master   5m32s   v1.26.1\n
            "},{"location":"other-tools/kubernetes/kubespray/#deploy-a-hello-minikube-application","title":"Deploy A Hello Minikube Application","text":"
            • Use the kubectl create command to create a Deployment that manages a Pod. The Pod runs a Container based on the provided Docker image.

              kubectl create deployment hello-minikube --image=k8s.gcr.io/echoserver:1.4\n
              kubectl expose deployment hello-minikube --type=LoadBalancer --port=8080\n\nservice/hello-minikube exposed\n
            • View the deployments information:

              kubectl get deployments\n\nNAME             READY   UP-TO-DATE   AVAILABLE   AGE\nhello-minikube   1/1     1            1           50s\n
            • View the port information:

              kubectl get svc hello-minikube\n\nNAME             TYPE           CLUSTER-IP      EXTERNAL-IP   PORT(S)          AGE\nhello-minikube   LoadBalancer   10.233.35.126   <pending>     8080:30723/TCP   40s\n
            • Expose the service locally

              kubectl port-forward svc/hello-minikube 30723:8080\n\nForwarding from [::1]:30723 -> 8080\nForwarding from 127.0.0.1:30723 -> 8080\nHandling connection for 30723\nHandling connection for 30723\n

            Go to browser, visit http://<Master-Floating-IP>:8080 i.e. http://140.247.152.235:8080/ to check the hello minikube default page.

            "},{"location":"other-tools/kubernetes/kubespray/#clean-up","title":"Clean up","text":"

            Now you can clean up the app resources you created in your cluster:

            kubectl delete service hello-minikube\nkubectl delete deployment hello-minikube\n
            "},{"location":"other-tools/kubernetes/microk8s/","title":"Microk8s","text":""},{"location":"other-tools/kubernetes/microk8s/#pre-requisite","title":"Pre-requisite","text":"

            We will need 1 VM to create a single node kubernetes cluster using microk8s. We are using following setting for this purpose:

            • 1 Linux machine, ubuntu-22.04-x86_64 or your choice of Ubuntu OS image, cpu-su.2 flavor with 2vCPU, 8GB RAM, 20GB storage - also assign Floating IP to this VM.

            • setup Unique hostname to the machine using the following command:

              echo \"<node_internal_IP> <host_name>\" >> /etc/hosts\nhostnamectl set-hostname <host_name>\n

              For example:

              echo \"192.168.0.62 microk8s\" >> /etc/hosts\nhostnamectl set-hostname microk8s\n
            "},{"location":"other-tools/kubernetes/microk8s/#install-microk8s-on-ubuntu","title":"Install MicroK8s on Ubuntu","text":"

            Run the below command on the Ubuntu VM:

            • SSH into microk8s machine

            • Switch to root user: sudo su

            • Update the repositories and packages:

              apt-get update && apt-get upgrade -y\n
            • Install MicroK8s:

              sudo snap install microk8s --classic\n
            • Check the status while Kubernetes starts

              microk8s status --wait-ready\n
            • Turn on the services you want:

              microk8s enable dns dashboard\n

              Try microk8s enable --help for a list of available services and optional features. microk8s disable <name> turns off a service. For example other useful services are: microk8s enable registry istio storage

            • Start using Kubernetes

              microk8s kubectl get all --all-namespaces\n

              If you mainly use MicroK8s you can make our kubectl the default one on your command-line with alias mkctl=\"microk8s kubectl\". Since it is a standard upstream kubectl, you can also drive other Kubernetes clusters with it by pointing to the respective kubeconfig file via the --kubeconfig argument.

            • Access the Kubernetes dashboard UI:

              As we see above the kubernetes-dashboard service in the kube-system namespace has a ClusterIP of 10.152.183.73 and listens on TCP port 443. The ClusterIP is randomly assigned, so if you follow these steps on your host, make sure you check the IP adress you got.

              Note

              Another way to access the default token to be used for the dashboard access can be retrieved with:

              token=$(microk8s kubectl -n kube-system get secret | grep default-token | cut -d \"\" -f1)\nmicrok8s kubectl -n kube-system describe secret $token\n
            • Keep running the kubernetes-dashboad on Proxy to access it via web browser:

              microk8s dashboard-proxy\n\nChecking if Dashboard is running.\nDashboard will be available at https://127.0.0.1:10443\nUse the following token to login:\neyJhbGc....\n

              Important

              This tells us the IP address of the Dashboard and the port. The values assigned to your Dashboard will differ. Please note the displayed PORT and the TOKEN that are required to access the kubernetes-dashboard. Make sure, the exposed PORT is opened in Security Groups for the instance following this guide.

              This will show the token to login to the Dashbord shown on the url with NodePort.

              You'll need to wait a few minutes before the dashboard becomes available. If you open a web browser on the same desktop you deployed Microk8s and point it to https://<Floating-IP>:<PORT> (where PORT is the PORT assigned to the Dashboard noted while running the above command), you\u2019ll need to accept the risk (because the Dashboard uses a self-signed certificate). And, we can enter the previously noted TOKEN to access the kubernetes-dashboard.

              Once you enter the correct TOKEN the kubernetes-dashboard is accessed and looks like below:

            Information

            • Start and stop Kubernetes: Kubernetes is a collection of system services that talk to each other all the time. If you don\u2019t need them running in the background then you will save battery by stopping them. microk8s start and microk8s stop will those tasks for you.

            • To Reset the infrastructure to a clean state: microk8s reset

            "},{"location":"other-tools/kubernetes/microk8s/#deploy-a-container-using-the-kubernetes-dashboard","title":"Deploy a Container using the Kubernetes-Dashboard","text":"

            Click on the + button in the top left corner of the main window. On the resulting page, click Create from form and then fill out the necessary information as shown below:

            You should immediately be directed to a page that lists your new deployment as shown below:

            Go back to the terminal window and issue the command:

            microk8s kubectl get svc tns -n kube-system\n\nNAME   TYPE           CLUSTER-IP      EXTERNAL-IP   PORT(S)          AGE\ntns    LoadBalancer   10.152.183.90   <pending>     8080:30012/TCP   14m\n

            Go to browser, visit http://<Floating-IP>:<NodePort> i.e. http://128.31.26.4:30012/ to check the nginx default page.

            "},{"location":"other-tools/kubernetes/microk8s/#deploy-a-sample-nginx-application","title":"Deploy A Sample Nginx Application","text":"
            • Create an alias:

              alias mkctl=\"microk8s kubectl\"\n
            • Create a deployment, in this case Nginx:

              mkctl create deployment --image nginx my-nginx\n
            • To access the deployment we will need to expose it:

              mkctl expose deployment my-nginx --port=80 --type=NodePort\n
              mkctl get svc my-nginx\n\nNAME       TYPE       CLUSTER-IP      EXTERNAL-IP   PORT(S)        AGE\nmy-nginx   NodePort   10.152.183.41   <none>        80:31225/TCP   35h\n

            Go to browser, visit http://<Floating-IP>:<NodePort> i.e. http://128.31.26.4:31225/ to check the nginx default page.

            "},{"location":"other-tools/kubernetes/microk8s/#deploy-another-application","title":"Deploy Another Application","text":"

            You can start by creating a microbot deployment with two pods via the kubectl cli:

            mkctl create deployment microbot --image=dontrebootme/microbot:v1\nmkctl scale deployment microbot --replicas=2\n

            To expose the deployment to NodePort, you need to create a service:

            mkctl expose deployment microbot --type=NodePort --port=80 --name=microbot-service\n

            View the port information:

            mkctl get svc microbot-service\n\nNAME               TYPE       CLUSTER-IP     EXTERNAL-IP   PORT(S)        AGE\nmicrobot-service   NodePort   10.152.183.8   <none>        80:31442/TCP   35h\n

            Go to browser, visit http://<Floating-IP>:<NodePort> i.e. http://128.31.26.4:31442/ to check the microbot default page.

            "},{"location":"other-tools/kubernetes/minikube/","title":"Minikube","text":""},{"location":"other-tools/kubernetes/minikube/#minimum-system-requirements-for-minikube","title":"Minimum system requirements for minikube","text":"
            • 2 GB RAM or more
            • 2 CPU / vCPUs or more
            • 20 GB free hard disk space or more
            • Docker / Virtual Machine Manager \u2013 KVM & VirtualBox. Docker, Hyperkit, Hyper-V, KVM, Parallels, Podman, VirtualBox, or VMWare are examples of container or virtual machine managers.
            "},{"location":"other-tools/kubernetes/minikube/#pre-requisite","title":"Pre-requisite","text":"

            We will need 1 VM to create a single node kubernetes cluster using minikube. We are using following setting for this purpose:

            • 1 Linux machine for master, ubuntu-22.04-x86_64 or your choice of Ubuntu OS image, cpu-su.2 flavor with 2vCPU, 8GB RAM, 20GB storage - also assign Floating IP to this VM.

            • setup Unique hostname to the machine using the following command:

              echo \"<node_internal_IP> <host_name>\" >> /etc/hosts\nhostnamectl set-hostname <host_name>\n

              For example:

              echo \"192.168.0.62 minikube\" >> /etc/hosts\nhostnamectl set-hostname minikube\n
            "},{"location":"other-tools/kubernetes/minikube/#install-minikube-on-ubuntu","title":"Install Minikube on Ubuntu","text":"

            Run the below command on the Ubuntu VM:

            Very Important

            Run the following steps as non-root user i.e. ubuntu.

            • SSH into minikube machine

            • Update the repositories and packages:

              sudo apt-get update && sudo apt-get upgrade -y\n
            • Install curl, wget, and apt-transport-https

              sudo apt-get update && sudo apt-get install -y curl wget apt-transport-https\n
            "},{"location":"other-tools/kubernetes/minikube/#download-and-install-the-latest-version-of-docker-ce","title":"Download and install the latest version of Docker CE","text":"
            • Download and install Docker CE:

              curl -fsSL https://get.docker.com -o get-docker.sh\nsudo sh get-docker.sh\n
            • Configure the Docker daemon:

              sudo usermod -aG docker $USER && newgrp docker\n
            "},{"location":"other-tools/kubernetes/minikube/#install-kubectl","title":"Install kubectl","text":"
            • Install kubectl binary

              kubectl: the command line util to talk to your cluster.

              sudo snap install kubectl --classic\n

              This outputs:

              kubectl 1.26.1 from Canonical\u2713 installed\n
            • Now verify the kubectl version:

              sudo kubectl version -o yaml\n
            "},{"location":"other-tools/kubernetes/minikube/#install-the-container-runtime-ie-containerd-on-master-and-worker-nodes","title":"Install the container runtime i.e. containerd on master and worker nodes","text":"

            To run containers in Pods, Kubernetes uses a container runtime.

            By default, Kubernetes uses the Container Runtime Interface (CRI) to interface with your chosen container runtime.

            • Install container runtime - containerd

              The first thing to do is configure the persistent loading of the necessary containerd modules. This forwarding IPv4 and letting iptables see bridged trafficis is done with the following command:

              cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf\noverlay\nbr_netfilter\nEOF\n\nsudo modprobe overlay\nsudo modprobe br_netfilter\n
            • Ensure net.bridge.bridge-nf-call-iptables is set to 1 in your sysctl config:

              # sysctl params required by setup, params persist across reboots\ncat <<EOF | sudo tee /etc/sysctl.d/k8s.conf\nnet.bridge.bridge-nf-call-iptables  = 1\nnet.bridge.bridge-nf-call-ip6tables = 1\nnet.ipv4.ip_forward                 = 1\nEOF\n
            • Apply sysctl params without reboot:

              sudo sysctl --system\n
            • Install the necessary dependencies with:

              sudo apt install -y curl gnupg2 software-properties-common apt-transport-https ca-certificates\n
            • The containerd.io packages in DEB and RPM formats are distributed by Docker. Add the required GPG key with:

              curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -\nsudo add-apt-repository \"deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable\"\n

              It's now time to Install and configure containerd:

              sudo apt update -y\nsudo apt install -y containerd.io\ncontainerd config default | sudo tee /etc/containerd/config.toml\n\n# Reload the systemd daemon with\nsudo systemctl daemon-reload\n\n# Start containerd\nsudo systemctl restart containerd\nsudo systemctl enable --now containerd\n

              You can verify containerd is running with the command:

              sudo systemctl status containerd\n
            "},{"location":"other-tools/kubernetes/minikube/#installing-minikube","title":"Installing minikube","text":"
            • Install minikube

              curl -LO https://storage.googleapis.com/minikube/releases/latest/minikube_latest_amd64.deb\nsudo dpkg -i minikube_latest_amd64.deb\n

              OR, install minikube using wget:

              wget https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64\ncp minikube-linux-amd64 /usr/bin/minikube\nchmod +x /usr/bin/minikube\n
            • Verify the Minikube installation:

              minikube version\n\nminikube version: v1.29.0\ncommit: ddac20b4b34a9c8c857fc602203b6ba2679794d3\n
            • Install conntrack:

              Kubernetes 1.26.1 requires conntrack to be installed in root's path:

              sudo apt-get install -y conntrack\n
            • Start minikube:

              As we are already stated in the beginning that we would be using docker as base for minikue, so start the minikube with the docker driver,

              minikube start --driver=docker --container-runtime=containerd\n

              Note

              • To check the internal IP, run the minikube ip command.

              • By default, Minikube uses the driver most relevant to the host OS. To use a different driver, set the --driver flag in minikube start. For example, to use others or none instead of Docker, run minikube start --driver=none. To persistent configuration so that you to run minikube start without explicitly passing i.e. in global scope the --vm-driver docker flag each time, run: minikube config set vm-driver docker.

              • Other start options: minikube start --force --driver=docker --network-plugin=cni --container-runtime=containerd

              • In case you want to start minikube with customize resources and want installer to automatically select the driver then you can run following command, minikube start --addons=ingress --cpus=2 --cni=flannel --install-addons=true --kubernetes-version=stable --memory=6g

              Output would like below:

              Perfect, above confirms that minikube cluster has been configured and started successfully.

            • Run below minikube command to check status:

              minikube status\n\nminikube\ntype: Control Plane\nhost: Running\nkubelet: Running\napiserver: Running\nkubeconfig: Configured\n
            • Run following kubectl command to verify the cluster info and node status:

              kubectl cluster-info\n\nKubernetes control plane is running at https://192.168.0.62:8443\nCoreDNS is running at https://192.168.0.62:8443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy\n\nTo further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.\n
              kubectl get nodes\n\nNAME       STATUS   ROLES                  AGE   VERSION\nminikube   Ready    control-plane,master   5m    v1.26.1\n
            • To see the kubectl configuration use the command:

              kubectl config view\n

              The output looks like:

            • Get minikube addon details:

              minikube addons list\n

              The output will display like below:

              If you wish to enable any addons run the below minikube command,

              minikube addons enable <addon-name>\n
            • Enable minikube dashboard addon:

              minikube dashboard\n\n\ud83d\udd0c  Enabling dashboard ...\n    \u25aa Using image kubernetesui/metrics-scraper:v1.0.7\n    \u25aa Using image kubernetesui/dashboard:v2.3.1\n\ud83e\udd14  Verifying dashboard health ...\n\ud83d\ude80  Launching proxy ...\n\ud83e\udd14  Verifying proxy health ...\nhttp://127.0.0.1:40783/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/\n
            • To view minikube dashboard url:

              minikube dashboard --url\n\n\ud83e\udd14  Verifying dashboard health ...\n\ud83d\ude80  Launching proxy ...\n\ud83e\udd14  Verifying proxy health ...\nhttp://127.0.0.1:42669/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/\n
            • Expose Dashboard on NodePort instead of ClusterIP:

              -- Check the current port for kubernetes-dashboard:

              kubectl get services -n kubernetes-dashboard\n

              The output looks like below:

              kubectl edit service kubernetes-dashboard -n kubernetes-dashboard\n

              -- Replace type: \"ClusterIP\" to \"NodePort\":

              -- After saving the file: Test again: kubectl get services -n kubernetes-dashboard

              Now the output should look like below:

              So, now you can browser the K8s Dashboard, visit http://<Floating-IP>:<NodePort> i.e. http://140.247.152.235:31881 to view the Dashboard.

            "},{"location":"other-tools/kubernetes/minikube/#deploy-a-sample-nginx-application","title":"Deploy A Sample Nginx Application","text":"
            • Create a deployment, in this case Nginx:

              A Kubernetes Pod is a group of one or more Containers, tied together for the purposes of administration and networking. The Pod in this tutorial has only one Container. A Kubernetes Deployment checks on the health of your Pod and restarts the Pod's Container if it terminates. Deployments are the recommended way to manage the creation and scaling of Pods.

            • Let's check if the Kubernetes cluster is up and running:

              kubectl get all --all-namespaces\nkubectl get po -A\nkubectl get nodes\n
              kubectl create deployment --image nginx my-nginx\n
            • To access the deployment we will need to expose it:

              kubectl expose deployment my-nginx --port=80 --type=NodePort\n

              To check which NodePort is opened and running the Nginx run:

              kubectl get svc\n

              The output will show:

              OR,

              minikube service list\n\n|----------------------|---------------------------|--------------|-------------|\n|      NAMESPACE       |           NAME            | TARGET PORT  |       URL   |\n|----------------------|---------------------------|--------------|-------------|\n| default              | kubernetes                | No node port |\n| default              | my-nginx                  |           80 | http:.:31081|\n| kube-system          | kube-dns                  | No node port |\n| kubernetes-dashboard | dashboard-metrics-scraper | No node port |\n| kubernetes-dashboard | kubernetes-dashboard      |           80 | http:.:31929|\n|----------------------|---------------------------|--------------|-------------|\n

              OR,

              kubectl get svc my-nginx\nminikube service my-nginx --url\n

              Once the deployment is up, you should be able to access the Nginx home page on the allocated NodePort from the node's Floating IP.

              Go to browser, visit http://<Floating-IP>:<NodePort> i.e. http://140.247.152.235:31081/ to check the nginx default page.

              For your example,

            "},{"location":"other-tools/kubernetes/minikube/#deploy-a-hello-minikube-application","title":"Deploy A Hello Minikube Application","text":"
            • Use the kubectl create command to create a Deployment that manages a Pod. The Pod runs a Container based on the provided Docker image.

              kubectl create deployment hello-minikube --image=k8s.gcr.io/echoserver:1.4\nkubectl expose deployment hello-minikube --type=NodePort --port=8080\n
            • View the port information:

              kubectl get svc hello-minikube\nminikube service hello-minikube --url\n

              Go to browser, visit http://<Floating-IP>:<NodePort> i.e. http://140.247.152.235:31293/ to check the hello minikube default page.

              For your example,

            "},{"location":"other-tools/kubernetes/minikube/#clean-up","title":"Clean up","text":"

            Now you can clean up the app resources you created in your cluster:

            kubectl delete service my-nginx\nkubectl delete deployment my-nginx\n\nkubectl delete service hello-minikube\nkubectl delete deployment hello-minikube\n
            "},{"location":"other-tools/kubernetes/minikube/#managing-minikube-cluster","title":"Managing Minikube Cluster","text":"
            • To stop the minikube, run

              minikube stop\n
            • To delete the single node cluster:

              minikube delete\n
            • To Start the minikube, run

              minikube start\n
            • Remove the Minikube configuration and data directories:

              rm -rf ~/.minikube\nrm -rf ~/.kube\n
            • If you have installed any Minikube related packages, remove them:

              sudo apt remove -y conntrack\n
            • In case you want to start the minikube with higher resource like 8 GB RM and 4 CPU then execute following commands one after the another.

              minikube config set cpus 4\nminikube config set memory 8192\nminikube delete\nminikube start\n
            "},{"location":"other-tools/kubernetes/k3s/k3s-ha-cluster-using-k3d/","title":"Set up K3s in High Availability using k3d","text":"

            First, Kubernetes HA has two possible setups: embedded or external database (DB). We\u2019ll use the embedded DB in this HA K3s cluster setup. For which etcd is the default embedded DB.

            There are some strongly recommended Kubernetes HA best practices and also there is Automated HA master deployment doc.

            "},{"location":"other-tools/kubernetes/k3s/k3s-ha-cluster-using-k3d/#pre-requisite","title":"Pre-requisite","text":"

            Make sure you have already installed k3d following this.

            "},{"location":"other-tools/kubernetes/k3s/k3s-ha-cluster-using-k3d/#ha-cluster-with-at-least-three-control-plane-nodes","title":"HA cluster with at least three control plane nodes","text":"
            k3d cluster create --servers 3 --image rancher/k3s:latest\n

            Here, --server 3: specifies requests three nodes to be created with the role server and --image rancher/k3s:latest: specifies the K3s image to be used here we are using latest

            • Switch context to the new cluster:

              kubectl config use-context k3d-k3s-default\n

              You can now check what has been created from the different points of view:

              kubectl get nodes --output wide\n

              The output will look like:

              kubectl get pods --all-namespaces --output wide\n

              OR,

              kubectl get pods -A -o wide\n

              The output will look like:

            "},{"location":"other-tools/kubernetes/k3s/k3s-ha-cluster-using-k3d/#scale-up-the-cluster","title":"Scale up the cluster","text":"

            You can quickly simulate the addition of another control plane node to the HA cluster:

            k3d node create extraCPnode --role=server --image=rancher/k3s:latest\n\nINFO[0000] Adding 1 node(s) to the runtime local cluster 'k3s-default'...\nINFO[0000] Starting Node 'k3d-extraCPnode-0'\nINFO[0018] Updating loadbalancer config to include new server node(s)\nINFO[0018] Successfully configured loadbalancer k3d-k3s-default-serverlb!\nINFO[0019] Successfully created 1 node(s)!\n

            Here, extraCPnode: specifies the name for the node, --role=server : sets the role for the node to be a control plane/server, --image rancher/k3s:latest: specifies the K3s image to be used here we are using latest

            kubectl get nodes\n\nNAME                       STATUS   ROLES         AGE   VERSION\nk3d-extracpnode-0          Ready    etcd,master   31m   v1.19.3+k3s2\nk3d-k3s-default-server-0   Ready    etcd,master   47m   v1.19.3+k3s2\nk3d-k3s-default-server-1   Ready    etcd,master   47m   v1.19.3+k3s2\nk3d-k3s-default-server-2   Ready    etcd,master   47m   v1.19.3+k3s2\n

            OR,

            kubectl get nodes --output wide\n

            The output looks like below:

            "},{"location":"other-tools/kubernetes/k3s/k3s-ha-cluster-using-k3d/#heavy-armored-against-crashes","title":"Heavy Armored against crashes","text":"

            As we are working with containers, the best way to \"crash\" a node is to literally stop the container:

            docker stop k3d-k3s-default-server-0\n

            Note

            The Docker and k3d commands will show the state change immediately. However, the Kubernetes (read: K8s or K3s) cluster needs a short time to see the state change to NotReady.

            kubectl get nodes\n\nNAME                       STATUS     ROLES         AGE   VERSION\nk3d-extracpnode-0          Ready      etcd,master   32m   v1.19.3+k3s2\nk3d-k3s-default-server-0   NotReady   etcd,master   48m   v1.19.3+k3s2\nk3d-k3s-default-server-1   Ready      etcd,master   48m   v1.19.3+k3s2\nk3d-k3s-default-server-2   Ready      etcd,master   48m   v1.19.3+k3s2\n

            Now it is a good time to reference again the load balancer k3d uses and how it is critical in allowing us to continue accessing the K3s cluster.

            While the load balancer internally switched to the next available node, from an external connectivity point of view, we still use the same IP/host. This abstraction saves us quite some efforts and it\u2019s one of the most useful features of k3d.

            Let\u2019s look at the state of the cluster:

            kubectl get all --all-namespaces\n

            The output looks like below:

            Everything looks right. If we look at the pods more specifically, then we will see that K3s automatically self-healed by recreating pods running on the failed node on other nodes:

            kubectl get pods --all-namespaces --output wide\n

            As the output can be seen:

            Finally, to show the power of HA and how K3s manages it, let\u2019s restart the node0 and see it being re-included into the cluster as if nothing happened:

            docker start k3d-k3s-default-server-0\n

            Our cluster is stable, and all the nodes are fully operational again as shown below:

            "},{"location":"other-tools/kubernetes/k3s/k3s-ha-cluster-using-k3d/#cleaning-the-resources","title":"Cleaning the resources","text":"
            k3d cluster delete\n
            "},{"location":"other-tools/kubernetes/k3s/k3s-ha-cluster/","title":"K3s with High Availability setup","text":"

            First, Kubernetes HA has two possible setups: embedded or external database (DB). We\u2019ll use the external DB in this HA K3s cluster setup. For which MySQL is the external DB as shown here:

            In the diagram above, both the user running kubectl and each of the two agents connect to the TCP Load Balancer. The Load Balancer uses a list of private IP addresses to balance the traffic between the three servers. If one of the servers crashes, it is be removed from the list of IP addresses.

            The servers use the SQL data store to synchronize the cluster\u2019s state.

            "},{"location":"other-tools/kubernetes/k3s/k3s-ha-cluster/#requirements","title":"Requirements","text":"

            i. Managed TCP Load Balancer

            ii. Managed MySQL service

            iii. Three VMs to run as K3s servers

            iv. Two VMs to run as K3s agents

            There are some strongly recommended Kubernetes HA best practices and also there is Automated HA master deployment doc.

            "},{"location":"other-tools/kubernetes/k3s/k3s-ha-cluster/#managed-tcp-load-balancer","title":"Managed TCP Load Balancer","text":"

            Create a load balancer using nginx: The nginx.conf located at etc/nginx/nginx.conf contains upstream that is pointing to the 3 K3s Servers on port 6443 as shown below:

            events {}\n...\n\nstream {\n  upstream k3s_servers {\n    server <k3s_server1-Internal-IP>:6443;\n    server <k3s_server2-Internal-IP>:6443;\n    server <k3s_server3-Internal-IP>:6443;\n  }\n\n  server {\n    listen 6443;\n    proxy_pass k3s_servers;\n  }\n}\n
            "},{"location":"other-tools/kubernetes/k3s/k3s-ha-cluster/#managed-mysql-service","title":"Managed MySQL service","text":"

            Create a MySQL database server with a new database and create a new mysql user and password with granted permission to read/write the new database. In this example, you can create:

            database name: <YOUR_DB_NAME> database user: <YOUR_DB_USER_NAME> database password: <YOUR_DB_USER_PASSWORD>

            "},{"location":"other-tools/kubernetes/k3s/k3s-ha-cluster/#three-vms-to-run-as-k3s-servers","title":"Three VMs to run as K3s servers","text":"

            Create 3 K3s Master VMs and perform the following steps on each of them:

            i. Export the datastore endpoint:

            export K3S_DATASTORE_ENDPOINT='mysql://<YOUR_DB_USER_NAME>:<YOUR_DB_USER_PASSWORD>@tcp(<MySQL-Server-Internal-IP>:3306)/<YOUR_DB_NAME>'\n

            ii. Install the K3s with setting not to deploy any pods on this server (opposite of affinity) unless critical addons and tls-san set <Loadbalancer-Internal-IP> as alternative name for that tls certificate.

            curl -sfL https://get.k3s.io | sh -s - server \\\n    --node-taint CriticalAddonsOnly=true:NoExecute \\\n    --tls-san <Loadbalancer-Internal-IP_or_Hostname>\n
            • Verify all master nodes are visible to one another:

              sudo k3s kubectl get node\n
            • Generate token from one of the K3s Master VMs: You need to extract a token from the master that will be used to join the nodes to the control plane by running following command on one of the K3s master node:

              sudo cat /var/lib/rancher/k3s/server/node-token\n

              You will then obtain a token that looks like:

              K1097aace305b0c1077fc854547f34a598d23330ff047ddeed8beb3c428b38a1ca7::server:6cc9fbb6c5c9de96f37fb14b5535c778\n
            "},{"location":"other-tools/kubernetes/k3s/k3s-ha-cluster/#two-vms-to-run-as-k3s-agents","title":"Two VMs to run as K3s agents","text":"

            Set the K3S_URL to point to the Loadbalancer\u2019s internal IP and set the K3S_TOKEN from the clipboard on both of the agent nodes:

            curl -sfL https://get.k3s.io | K3S_URL=https://<Loadbalancer-Internal-IP_or_Hostname>:6443\n    K3S_TOKEN=<Token_From_Master> sh -\n

            Once both Agents are running, if you run the following command on Master Server, you can see all nodes:

            sudo k3s kubectl get node\n
            "},{"location":"other-tools/kubernetes/k3s/k3s-ha-cluster/#simulate-a-failure","title":"Simulate a failure","text":"

            To simulate a failure, stop the K3s service on one or more of the K3s servers manually, then run the kubectl get nodes command:

            sudo systemctl stop k3s\n

            The third server will take over at this point.

            • To restart servers manually:

              sudo systemctl restart k3s\n
            "},{"location":"other-tools/kubernetes/k3s/k3s-ha-cluster/#on-your-local-development-machine-to-access-kubernetes-cluster-remotely-optional","title":"On your local development machine to access Kubernetes Cluster Remotely (Optional)","text":"

            Important Requirement

            Your local development machine must have installed kubectl.

            • Copy kubernetes config to your local machine: Copy the kubeconfig file's content located at the K3s master node at /etc/rancher/k3s/k3s.yaml to your local machine's ~/.kube/config file. Before saving, please change the cluster server path from 127.0.0.1 to <Loadbalancer-Internal-IP>. This will allow your local machine to see the cluster nodes:

              kubectl get nodes\n
            "},{"location":"other-tools/kubernetes/k3s/k3s-ha-cluster/#kubernetes-dashboard","title":"Kubernetes Dashboard","text":"

            The Kubernetes Dashboard is a GUI tool to help you work more efficiently with K8s cluster. This is only accessible from within the cluster (at least not without some serious tweaking).

            check releases for the command to use for Installation:

            kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.3.1/aio/deploy/recommended.yaml\n
            • Dashboard RBAC Configuration:

              dashboard.admin-user.yml

              apiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: admin-user\n  namespace: kubernetes-dashboard\n

              dashboard.admin-user-role.yml

              apiVersion: rbac.authorization.k8s.io/v1\nkind: ClusterRoleBinding\nmetadata:\n  name: admin-user\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: ClusterRole\n  name: cluster-admin\nsubjects:\n- kind: ServiceAccount\n  name: admin-user\n  namespace: kubernetes-dashboard\n
            • Deploy the admin-user configuration:

              sudo k3s kubectl create -f dashboard.admin-user.yml -f dashboard.admin-user-role.yml\n

              Important Note

              If you're doing this from your local development machine, remove sudo k3s and just use kubectl)

            • Get bearer token

              sudo k3s kubectl -n kubernetes-dashboard describe secret admin-user-token \\\n  | grep ^token\n
            • Start dashboard locally:

              sudo k3s kubectl proxy\n

              Then you can sign in at this URL using your token we got in the previous step:

              http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/\n
            "},{"location":"other-tools/kubernetes/k3s/k3s-ha-cluster/#deploying-nginx-using-deployment","title":"Deploying Nginx using deployment","text":"
            • Create a deployment nginx.yaml:

              vi nginx.yaml\n
            • Copy and paste the following content in nginx.yaml:

              apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: mysite\n  labels:\n    app: mysite\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: mysite\n  template:\n    metadata:\n      labels:\n        app : mysite\n    spec:\n      containers:\n        - name : mysite\n          image: nginx\n          ports:\n            - containerPort: 80\n
              sudo k3s kubectl apply -f nginx.yaml\n
            • Verify the nginx pod is in Running state:

              sudo k3s kubectl get pods --all-namespaces\n

              OR,

              kubectl get pods --all-namespaces --output wide\n

              OR,

              kubectl get pods -A -o wide\n
            • Scale the pods to available agents:

              sudo k3s kubectl scale --replicas=2 deploy/mysite\n
            • View all deployment status:

              sudo k3s kubectl get deploy mysite\n\nNAME     READY   UP-TO-DATE   AVAILABLE   AGE\nmysite   2/2     2            2           85s\n
            • Delete the nginx deployment and pod:

              sudo k3s kubectl delete -f nginx.yaml\n

              OR,

              sudo k3s kubectl delete deploy mysite\n
            "},{"location":"other-tools/kubernetes/k3s/k3s-using-k3d/","title":"Setup K3s cluster Using k3d","text":"

            One of the most popular and second method of creating k3s cluster is by using k3d. By the name itself it suggests, K3s-in-docker, is a wrapper around K3s \u2013 Lightweight Kubernetes that runs it in docker. Please refer to this link to get brief insights of this wonderful tool. It provides a seamless experience working with K3s cluster management with some straight forward commands. k3d is efficient enough to create and manage K3s single node and well as K3s High Availability clusters just with few commands.

            Note

            For using k3d you must have docker installed in your system

            "},{"location":"other-tools/kubernetes/k3s/k3s-using-k3d/#install-docker","title":"Install Docker","text":"
            • Install container runtime - docker

              apt-get install docker.io -y\n
            • Configure the Docker daemon, in particular to use systemd for the management of the container\u2019s cgroups

              cat <<EOF | sudo tee /etc/docker/daemon.json\n{\n\"exec-opts\": [\"native.cgroupdriver=systemd\"]\n}\nEOF\n\nsystemctl enable --now docker\nusermod -aG docker ubuntu\nsystemctl daemon-reload\nsystemctl restart docker\n
            "},{"location":"other-tools/kubernetes/k3s/k3s-using-k3d/#install-kubectl","title":"Install kubectl","text":"
            • Install kubectl binary

              kubectl: the command line util to talk to your cluster.

              snap install kubectl --classic\n

              This outputs:

              kubectl 1.26.1 from Canonical\u2713 installed\n
            • Now verify the kubectl version:

              kubectl version -o yaml\n
            "},{"location":"other-tools/kubernetes/k3s/k3s-using-k3d/#installing-k3d","title":"Installing k3d","text":""},{"location":"other-tools/kubernetes/k3s/k3s-using-k3d/#k3d-installation","title":"k3d Installation","text":"

            The below command will install the k3d, in your system using the installation script.

            wget -q -O - https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash\n

            OR,

            curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash\n

            To verify the installation, please run the following command:

            k3d version\n\nk3d version v5.0.0\nk3s version v1.21.5-k3s1 (default)\n

            After the successful installation, you are ready to create your cluster using k3d and run K3s in docker within seconds.

            "},{"location":"other-tools/kubernetes/k3s/k3s-using-k3d/#getting-started","title":"Getting Started","text":"

            Now let's directly jump into creating our K3s cluster using k3d.

            1. Create k3d Cluster:

              k3d cluster create k3d-demo-cluster\n

              This single command spawns a K3s cluster with two containers: A Kubernetes control-plane node(server) and a load balancer(serverlb) in front of it. It puts both of them in a dedicated Docker network and exposes the Kubernetes API on a randomly chosen free port on the Docker host. It also creates a named Docker volume in the background as a preparation for image imports.

              You can also look for advance syntax for cluster creation:

              k3d cluster create mycluster --api-port 127.0.0.1:6445 --servers 3 \\\n    --agents 2 --volume '/home/me/mycode:/code@agent[*]' --port '8080:80@loadbalancer'\n

              Here, the above single command spawns a K3s cluster with six containers:

              • load balancer

              • 3 servers (control-plane nodes)

              • 2 agents (formerly worker nodes)

              With the --api-port 127.0.0.1:6445, you tell k3d to map the Kubernetes API Port (6443 internally) to 127.0.0.1/localhost\u2019s port 6445. That means that you will have this connection string in your Kubeconfig: server: https://127.0.0.1:6445 to connect to this cluster.

              This port will be mapped from the load balancer to your host system. From there, requests will be proxied to your server nodes, effectively simulating a production setup, where server nodes also can go down and you would want to failover to another server.

              The --volume /home/me/mycode:/code@agent[*] bind mounts your local directory /home/me/mycode to the path /code inside all ([*] of your agent nodes). Replace * with an index (here: 0 or 1) to only mount it into one of them.

              The specification telling k3d which nodes it should mount the volume to is called \"node filter\" and it\u2019s also used for other flags, like the --port flag for port mappings.

              That said, --port '8080:80@loadbalancer' maps your local host\u2019s port 8080 to port 80 on the load balancer (serverlb), which can be used to forward HTTP ingress traffic to your cluster. For example, you can now deploy a web app into the cluster (Deployment), which is exposed (Service) externally via an Ingress such as myapp.k3d.localhost.

              Then (provided that everything is set up to resolve that domain to your localhost IP), you can point your browser to http://myapp.k3d.localhost:8080 to access your app. Traffic then flows from your host through the Docker bridge interface to the load balancer. From there, it\u2019s proxied to the cluster, where it passes via Ingress and Service to your application Pod.

              Note

              You have to have some mechanism set up to route to resolve myapp.k3d.localhost to your local host IP (127.0.0.1). The most common way is using entries of the form 127.0.0.1 myapp.k3d.localhost in your /etc/hosts file (C:\\Windows\\System32\\drivers\\etc\\hosts on Windows). However, this does not allow for wildcard entries (*.localhost), so it may become a bit cumbersome after a while, so you may want to have a look at tools like dnsmasq (MacOS/UNIX) or Acrylic (Windows) to ease the burden.

            2. Getting the cluster\u2019s kubeconfig: Get the new cluster\u2019s connection details merged into your default kubeconfig (usually specified using the KUBECONFIG environment variable or the default path $HOME/.kube/config) and directly switch to the new context:

              k3d kubeconfig merge k3d-demo-cluster --kubeconfig-switch-context\n

              This outputs:

              /root/.k3d/kubeconfig-k3d-demo-cluster.yaml\n
            3. Checking the nodes running on k3d cluster:

              k3d node list\n

              You can see here two nodes. The (very) smart implementation here is that while the cluster is running on its node k3d-k3s-default-server-0, there is another \"node\" that acts as the load balancer i.e. k3d-k3d-demo-cluster-serverlb.

            4. Firing Kubectl commands that allows you to run commands against Kubernetes:

              i. The below command will list the nodes available in our cluster:

              kubectl get nodes -o wide\n

              OR,

              kubectl get nodes --output wide\n

              The output will look like:

              ii. To look at what\u2019s inside the K3s cluster (pods, services, deployments, etc.):

              kubectl get all --all-namespaces\n

              The output will look like:

              We can see that, in addition to the Kubernetes service, K3s deploys DNS, metrics and ingress (traefik) services when you use the defaults.

              iii. List the active k3d clusters:

              k3d cluster list\n

              iv. Check the cluster connectivity:

              kubectl cluster-info\n

              To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.

            5. Check the active containers:

              docker ps\n

            Now as you can observe, the cluster is up and running and we can play around with the cluster, you can create and deploy your applications over the cluster.

            "},{"location":"other-tools/kubernetes/k3s/k3s-using-k3d/#deleting-cluster","title":"Deleting Cluster","text":"
            k3d cluster delete k3d-demo-cluster\n\nINFO[0000] Deleting cluster 'k3d-demo-cluster'\nINFO[0000] Deleted k3d-k3d-demo-cluster-serverlb\nINFO[0001] Deleted k3d-k3d-demo-cluster-server-0\nINFO[0001] Deleting cluster network 'k3d-k3d-demo-cluster'\nINFO[0001] Deleting image volume 'k3d-k3d-demo-cluster-images'\nINFO[0001] Removing cluster details from default kubeconfig...\nINFO[0001] Removing standalone kubeconfig file (if there is one)...\nINFO[0001] Successfully deleted cluster k3d-demo-cluster!\n

            You can also create a k3d High Availability cluster and add as many nodes you want within seconds.

            "},{"location":"other-tools/kubernetes/k3s/k3s-using-k3sup/","title":"K3s cluster setup using k3sup","text":"

            k3sup (pronounced ketchup) is a popular open source tool to install K3s over SSH.

            • Bootstrap the cluster

            The two most important commands in k3sup are:

            i. install: install K3s to a new server and create a join token for the cluster

            ii. join: fetch the join token from a server, then use it to install K3s to an agent

            "},{"location":"other-tools/kubernetes/k3s/k3s-using-k3sup/#download-k3sup","title":"Download k3sup","text":"
            curl -sLS https://get.k3sup.dev | sh\nsudo install k3sup /usr/bin/\n\nk3sup --help\n
            • Other options for install:

              --cluster - start this server in clustering mode using embedded etcd (embedded HA)

              --skip-install - if you already have k3s installed, you can just run this command to get the kubeconfig

              --ssh-key - specify a specific path for the SSH key for remote login

              --local-path - default is ./kubeconfig - set the file where you want to save your cluster's kubeconfig. By default this file will be overwritten.

              --merge - Merge config into existing file instead of overwriting (e.g. to add config to the default kubectl config, use --local-path ~/.kube/config --merge).

              --context - default is default - set the name of the kubeconfig context.

              --ssh-port - default is 22, but you can specify an alternative port i.e. 2222

              --k3s-extra-args - Optional extra arguments to pass to k3s installer, wrapped in quotes, i.e. --k3s-extra-args '--no-deploy traefik' or --k3s-extra-args '--docker'. For multiple args combine then within single quotes --k3s-extra-args

              --no-deploy traefik --docker.

              --k3s-version - set the specific version of k3s, i.e. v0.9.1

              --ipsec - Enforces the optional extra argument for k3s: --flannel-backend option: ipsec

              --print-command - Prints out the command, sent over SSH to the remote computer

              --datastore - used to pass a SQL connection-string to the --datastore-endpoint flag of k3s.

              See even more install options by running k3sup install --help.

            • On Master Node:

              export SERVER_IP=<Master-Internal-IP>\nexport USER=root\n\nk3sup install --ip $SERVER_IP --user $USER\n
            • On Agent Node:

              Next join one or more agents to the cluster:

              export AGENT_IP=<Agent-Internal-IP>\n\nexport SERVER_IP=<Master-Internal-IP>\nexport USER=root\n\nk3sup join --ip $AGENT_IP --server-ip $SERVER_IP --user $USER\n
            "},{"location":"other-tools/kubernetes/k3s/k3s-using-k3sup/#create-a-multi-master-ha-setup-with-external-sql","title":"Create a multi-master (HA) setup with external SQL","text":"
            export LB_IP='<Loadbalancer-Internal-IP_or_Hostname>'\nexport DATASTORE='mysql://<YOUR_DB_USER_NAME>:<YOUR_DB_USER_PASSWORD>@tcp(<MySQL-Server-Internal-IP>:3306)/<YOUR_DB_NAME>'\nexport CHANNEL=latest\n

            Before continuing, check that your environment variables are still populated from earlier, and if not, trace back and populate them.

            echo $LB_IP\necho $DATASTORE\necho $CHANNEL\n
            k3sup install --user root --ip $SERVER1 \\\n--k3s-channel $CHANNEL \\\n--print-command \\\n--datastore='${DATASTORE}' \\\n--tls-san $LB_IP\n\nk3sup install --user root --ip $SERVER2 \\\n--k3s-channel $CHANNEL \\\n--print-command \\\n--datastore='${DATASTORE}' \\\n--tls-san $LB_IP\n\nk3sup install --user root --ip $SERVER3 \\\n--k3s-channel $CHANNEL \\\n--print-command \\\n--datastore='${DATASTORE}' \\\n--tls-san $LB_IP\n\nk3sup join --user root --server-ip $LB_IP --ip $AGENT1 \\\n--k3s-channel $CHANNEL \\\n--print-command\n\nk3sup join --user root --server-ip $LB_IP --ip $AGENT2 \\\n--k3s-channel $CHANNEL \\\n--print-command\n

            There will be a kubeconfig file created in the current working directory with the IP address of the LoadBalancer set for kubectl to use.

            • Check the nodes have joined:

              export KUBECONFIG=`pwd`/kubeconfig\nkubectl get node\n
            "},{"location":"other-tools/kubernetes/k3s/k3s/","title":"K3s","text":""},{"location":"other-tools/kubernetes/k3s/k3s/#features","title":"Features","text":"
            • Lightweight certified K8s distro

            • Built for production operations

            • 40MB binary, 250MB memeory consumption

            • Single process w/ integrated K8s master, Kubelet, and containerd

            • Supports not only etcd to hold the cluster state, but also SQLite (for single-node, simpler setups) or external DBs like MySQL and PostgreSQL

            • Open source project

            "},{"location":"other-tools/kubernetes/k3s/k3s/#components-and-architecure","title":"Components and architecure","text":"
            • High-Availability K3s Server with an External DB:

              or,

              For this kind of high availability k3s setup read this.

            "},{"location":"other-tools/kubernetes/k3s/k3s/#pre-requisite","title":"Pre-requisite","text":"

            We will need 1 control-plane(master) and 2 worker nodes to create a single control-plane kubernetes cluster using k3s. We are using following setting for this purpose:

            • 1 Linux machine for master, ubuntu-22.04-x86_64 or your choice of Ubuntu OS image, cpu-su.2 flavor with 2vCPU, 8GB RAM, 20GB storage - also assign Floating IP to the master node.

            • 2 Linux machines for worker, ubuntu-22.04-x86_64 or your choice of Ubuntu OS image, cpu-su.1 flavor with 1vCPU, 4GB RAM, 20GB storage.

            • ssh access to all machines: Read more here on how to set up SSH on your remote VMs.

            "},{"location":"other-tools/kubernetes/k3s/k3s/#networking","title":"Networking","text":"

            The K3s server needs port 6443 to be accessible by all nodes.

            The nodes need to be able to reach other nodes over UDP port 8472 when Flannel VXLAN overlay networking is used. The node should not listen on any other port. K3s uses reverse tunneling such that the nodes make outbound connections to the server and all kubelet traffic runs through that tunnel. However, if you do not use Flannel and provide your own custom CNI, then port 8472 is not needed by K3s.

            If you wish to utilize the metrics server, you will need to open port 10250 on each node.

            If you plan on achieving high availability with embedded etcd, server nodes must be accessible to each other on ports 2379 and 2380.

            • Create 1 security group with appropriate Inbound Rules for K3s Server Nodes that will be used by all 3 nodes:

              Important Note

              The VXLAN overlay networking port on nodes should not be exposed to the world as it opens up your cluster network to be accessed by anyone. Run your nodes behind a firewall/security group that disables access to port 8472.

            • setup Unique hostname to each machine using the following command:

              echo \"<node_internal_IP> <host_name>\" >> /etc/hosts\nhostnamectl set-hostname <host_name>\n

              For example:

              echo \"192.168.0.235 k3s-master\" >> /etc/hosts\nhostnamectl set-hostname k3s-master\n

            In this step, you will setup the following nodes:

            • k3s-master

            • k3s-worker1

            • k3s-worker2

            The below steps will be performed on all the above mentioned nodes:

            • SSH into all the 3 machines

            • Switch as root: sudo su

            • Update the repositories and packages:

              apt-get update && apt-get upgrade -y\n
            • Install curl and apt-transport-https

              apt-get update && apt-get install -y apt-transport-https curl\n
            "},{"location":"other-tools/kubernetes/k3s/k3s/#install-docker","title":"Install Docker","text":"
            • Install container runtime - docker

              apt-get install docker.io -y\n
            • Configure the Docker daemon, in particular to use systemd for the management of the container\u2019s cgroups

              cat <<EOF | sudo tee /etc/docker/daemon.json\n{\n\"exec-opts\": [\"native.cgroupdriver=systemd\"]\n}\nEOF\n\nsystemctl enable --now docker\nusermod -aG docker ubuntu\nsystemctl daemon-reload\nsystemctl restart docker\n
            "},{"location":"other-tools/kubernetes/k3s/k3s/#configure-k3s-to-bootstrap-the-cluster-on-master-node","title":"Configure K3s to bootstrap the cluster on master node","text":"

            Run the below command on the master node i.e. k3s-master that you want to setup as control plane.

            • SSH into k3s-master machine

            • Switch to root user: sudo su

            • Execute the below command to initialize the cluster:

              curl -sfL https://get.k3s.io | sh -s - --kubelet-arg 'cgroup-driver=systemd' \\\n--node-taint CriticalAddonsOnly=true:NoExecute --docker\n

              OR, If you don't want to setup the K3s cluster without using docker as the container runtime, then just run without supplying the --docker argument.

              curl -sfL https://get.k3s.io | sh -\n

            After running this installation:

            • The K3s service will be configured to automatically restart after node reboots or if the process crashes or is killed

            • Additional utilities will be installed, including kubectl, crictl, ctr, k3s-killall.sh, and k3s-uninstall.sh

            • A kubeconfig file will be written to /etc/rancher/k3s/k3s.yaml and the kubectl installed by K3s will automatically use it.

            To check if the service installed successfully, you can use:

            systemctl status k3s\n

            The output looks like:

            OR,

            k3s --version\nkubectl version\n

            Note

            If you want to taint the node i.e. not to deploy pods on this node after installation then run: kubectl taint nodes <master_node_name> k3s-controlplane=true:NoExecute i.e. kubectl taint nodes k3s-master k3s-controlplane=true:NoExecute

            You can check if the master node is working by:

            k3s kubectl get nodes\n\nNAME         STATUS   ROLES                  AGE   VERSION\nk3s-master   Ready    control-plane,master   37s   v1.21.5+k3s2\n
            kubectl config get-clusters\n\nNAME\ndefault\n
            kubectl cluster-info\n\nKubernetes control plane is running at https://127.0.0.1:6443\nCoreDNS is running at https://127.0.0.1:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy\nMetrics-server is running at https://127.0.0.1:6443/api/v1/namespaces/kube-system/services/https:metrics-server:/proxy\n\nTo further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.\n
            kubectl get namespaces\n\nNAME              STATUS   AGE\ndefault           Active   27m\nkube-system       Active   27m\nkube-public       Active   27m\nkube-node-lease   Active   27m\n
            kubectl get endpoints -n kube-system\n\nNAME                    ENDPOINTS                                  AGE\nkube-dns                10.42.0.4:53,10.42.0.4:53,10.42.0.4:9153   27m\nmetrics-server          10.42.0.3:443                              27m\nrancher.io-local-path   <none>                                     27m\n
            kubectl get pods -n kube-system\n\nNAME                                      READY   STATUS    RESTARTS   AGE\nhelm-install-traefik-crd-ql7j2            0/1     Pending   0          32m\nhelm-install-traefik-mr65j                0/1     Pending   0          32m\ncoredns-7448499f4d-x57z7                  1/1     Running   0          32m\nmetrics-server-86cbb8457f-cg2fs           1/1     Running   0          32m\nlocal-path-provisioner-5ff76fc89d-kdfcl   1/1     Running   0          32m\n

            You need to extract a token from the master that will be used to join the nodes to the master.

            On the master node:

            sudo cat /var/lib/rancher/k3s/server/node-token\n

            You will then obtain a token that looks like:

            K1097aace305b0c1077fc854547f34a598d2::server:6cc9fbb6c5c9de96f37fb14b8\n
            "},{"location":"other-tools/kubernetes/k3s/k3s/#configure-k3s-on-worker-nodes-to-join-the-cluster","title":"Configure K3s on worker nodes to join the cluster","text":"

            Run the below command on both of the worker nodes i.e. k3s-worker1 and k3s-worker2 that you want to join the cluster.

            • SSH into k3s-worker1 and k3s-worker1 machine

            • Switch to root user: sudo su

            • Execute the below command to join the cluster using the token obtained from the master node:

              To install K3s on worker nodes and add them to the cluster, run the installation script with the K3S_URL and K3S_TOKEN environment variables. Here is an example showing how to join a worker node:

              curl -sfL https://get.k3s.io | K3S_URL=https://<Master-Internal-IP>:6443 \\\nK3S_TOKEN=<Join_Token> sh -\n

              Where <Master-Internal-IP> is the Internal IP of the master node and <Join_Token> is the token obtained from the master node.

              For example:

              curl -sfL https://get.k3s.io | K3S_URL=https://192.168.0.154:6443 \\\nK3S_TOKEN=K1019827f88b77cc5e1dce04d692d445c1015a578dafdc56aca829b2f\n501df9359a::server:1bf0d61c85c6dac6d5a0081da55f44ba sh -\n

              You can verify if the k3s-agent on both of the worker nodes is running by:

              systemctl status k3s-agent\n

              The output looks like:

            To verify that our nodes have successfully been added to the cluster, run the following command on master node:

            k3s kubectl get nodes\n

            OR,

            k3s kubectl get nodes -o wide\n

            Your output should look like:

            k3s kubectl get nodes\n\nNAME          STATUS   ROLES                  AGE     VERSION\nk3s-worker1   Ready    <none>                 5m16s   v1.21.5+k3s2\nk3s-worker2   Ready    <none>                 5m5s    v1.21.5+k3s2\nk3s-master    Ready    control-plane,master   9m33s   v1.21.5+k3s2\n

            This shows that we have successfully setup our K3s cluster ready to deploy applications to it.

            "},{"location":"other-tools/kubernetes/k3s/k3s/#deploying-nginx-using-deployment","title":"Deploying Nginx using deployment","text":"
            • Create a deployment nginx.yaml on master node

              vi nginx.yaml\n

              The nginx.yaml looks like this:

              apiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: mysite\n  labels:\n    app: mysite\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: mysite\n  template:\n    metadata:\n      labels:\n        app : mysite\n    spec:\n      containers:\n        - name : mysite\n          image: nginx\n          ports:\n            - containerPort: 80\n
              kubectl apply -f nginx.yaml\n
            • Verify the nginx pod is in Running state:

              sudo k3s kubectl get pods --all-namespaces\n
            • Scale the pods to available agents:

              sudo k3s kubectl scale --replicas=2 deploy/mysite\n
            • View all deployment status:

              sudo k3s kubectl get deploy mysite\n\nNAME     READY   UP-TO-DATE   AVAILABLE   AGE\nmysite   2/2     2            2           85s\n
            • Delete the nginx deployment and pod:

              sudo k3s kubectl delete -f nginx.yaml\n

              OR,

              sudo k3s kubectl delete deploy mysite\n

              Note

              Instead of apply manually any new deployment yaml, you can just copy the yaml file to the /var/lib/rancher/k3s/server/manifests/ folder i.e. sudo cp nginx.yaml /var/lib/rancher/k3s/server/manifests/.. This will automatically deploy the newly copied deployment on your cluster.

            "},{"location":"other-tools/kubernetes/k3s/k3s/#deploy-addons-to-k3s","title":"Deploy Addons to K3s","text":"

            K3s is a lightweight kubernetes tool that doesn\u2019t come packaged with all the tools but you can install them separately.

            • Install Helm Commandline tool on K3s:

              i. Download the latest version of Helm commandline tool using wget from this page.

              wget https://get.helm.sh/helm-v3.7.0-linux-amd64.tar.gz\n

              ii. Unpack it:

              tar -zxvf helm-v3.7.0-linux-amd64.tar.gz\n

              iii. Find the helm binary in the unpacked directory, and move it to its desired destination

              mv linux-amd64/helm /usr/bin/helm\nchmod +x /usr/bin/helm\n

              OR,

              Using Snap:

              snap install helm --classic\n

              OR,

              Using Apt (Debian/Ubuntu):

              curl https://baltocdn.com/helm/signing.asc | sudo apt-key add -\nsudo apt-get install apt-transport-https --yes\necho \"deb https://baltocdn.com/helm/stable/debian/ all main\" | sudo tee /etc/apt/sources.list.d/helm-stable-debian.list\nsudo apt-get update\nsudo apt-get install helm\n
            • Verify the Helm installation:

              helm version\n\nversion.BuildInfo{Version:\"v3.7.0\", GitCommit:\"eeac83883cb4014fe60267ec63735\n70374ce770b\", GitTreeState:\"clean\", GoVersion:\"go1.16.8\"}\n
            • Add the helm chart repository to allow installation of applications using helm:

              helm repo add stable https://charts.helm.sh/stable\nhelm repo update\n
            "},{"location":"other-tools/kubernetes/k3s/k3s/#deploy-a-sample-nginx-application-using-helm","title":"Deploy A Sample Nginx Application using Helm","text":"

            Nginx can be used as a web proxy to expose ingress web traffic routes in and out of the cluster.

            • You can install \"nginx web-proxy\" using Helm:

              export KUBECONFIG=/etc/rancher/k3s/k3s.yaml\nhelm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx\nhelm repo list\nhelm repo update\nhelm install stable ingress-nginx/ingress-nginx --namespace kube-system \\\n    --set defaultBackend.enabled=false --set controller.publishService.enabled=true\n
            • We can test if the application has been installed by:

              k3s kubectl get pods -n kube-system -l app=nginx-ingress -o wide\n\nNAME   READY STATUS  RESTARTS AGE  IP        NODE    NOMINATED NODE  READINESS GATES\nnginx.. 1/1  Running 0        19m  10.42.1.5 k3s-worker1   <none>      <none>\n
            • We have successfully deployed nginx web-proxy on k3s. Go to browser, visit http://<Master-Floating-IP> i.e. http://128.31.25.246 to check the nginx default page.

            "},{"location":"other-tools/kubernetes/k3s/k3s/#upgrade-k3s-using-the-installation-script","title":"Upgrade K3s Using the Installation Script","text":"

            To upgrade K3s from an older version you can re-run the installation script using the same flags, for example:

            curl -sfL https://get.k3s.io | sh -\n

            This will upgrade to a newer version in the stable channel by default.

            If you want to upgrade to a newer version in a specific channel (such as latest) you can specify the channel:

            curl -sfL https://get.k3s.io | INSTALL_K3S_CHANNEL=latest sh -\n

            If you want to upgrade to a specific version you can run the following command:

            curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION=vX.Y.Z-rc1 sh -\n

            From non root user's terminal to install the latest version, you do not need to pass INSTALL_K3S_VERSION that by default loads the Latest version.

            curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC=\"--write-kubeconfig-mode 644\" \\\n    sh -\n

            Note

            For more about on \"How to use flags and environment variables\" read this.

            "},{"location":"other-tools/kubernetes/k3s/k3s/#restarting-k3s","title":"Restarting K3s","text":"

            Restarting K3s is supported by the installation script for systemd and OpenRC.

            Using systemd:

            To restart servers manually:

            sudo systemctl restart k3s\n

            To restart agents manually:

            sudo systemctl restart k3s-agent\n

            Using OpenRC:

            To restart servers manually:

            sudo service k3s restart\n

            To restart agents manually:

            sudo service k3s-agent restart\n
            "},{"location":"other-tools/kubernetes/k3s/k3s/#uninstalling","title":"Uninstalling","text":"

            If you installed K3s with the help of the install.sh script, an uninstall script is generated during installation. The script is created on your master node at /usr/bin/k3s-uninstall.sh or as k3s-agent-uninstall.sh on your worker nodes.

            To remove K3s on the worker nodes, execute:

            sudo /usr/bin/k3s-agent-uninstall.sh\nsudo rm -rf /var/lib/rancher\n

            To remove k3s on the master node, execute:

            sudo /usr/bin/k3s-uninstall.sh\nsudo rm -rf /var/lib/rancher\n
            "},{"location":"other-tools/kubernetes/kubeadm/HA-clusters-with-kubeadm/","title":"Highly Available Kubernetes Cluster using kubeadm","text":""},{"location":"other-tools/kubernetes/kubeadm/HA-clusters-with-kubeadm/#objectives","title":"Objectives","text":"
            • Install a multi control-plane(master) Kubernetes cluster

            • Install a Pod network on the cluster so that your Pods can talk to each other

            • Deploy and test a sample app

            • Deploy K8s Dashboard to view all cluster's components

            "},{"location":"other-tools/kubernetes/kubeadm/HA-clusters-with-kubeadm/#components-and-architecure","title":"Components and architecure","text":"

            This shows components and architecture of a highly-available, production-grade Kubernetes cluster.

            You can learn about each component from Kubernetes Componets.

            "},{"location":"other-tools/kubernetes/kubeadm/HA-clusters-with-kubeadm/#pre-requisite","title":"Pre-requisite","text":"

            You will need 2 control-plane(master node) and 2 worker nodes to create a multi-master kubernetes cluster using kubeadm. You are going to use the following set up for this purpose:

            • 2 Linux machines for master, ubuntu-20.04-x86_64 or your choice of Ubuntu OS image, cpu-su.2 flavor with 2vCPU, 8GB RAM, 20GB storage.

            • 2 Linux machines for worker, ubuntu-20.04-x86_64 or your choice of Ubuntu OS image, cpu-su.1 flavor with 1vCPU, 4GB RAM, 20GB storage - also assign Floating IPs to both of the worker nodes.

            • 1 Linux machine for loadbalancer, ubuntu-20.04-x86_64 or your choice of Ubuntu OS image, cpu-su.1 flavor with 1vCPU, 4GB RAM, 20GB storage.

            • ssh access to all machines: Read more here on how to setup SSH to your remote VMs.

            • Create 2 security groups with appropriate ports and protocols:

            i. To be used by the master nodes:

            ii. To be used by the worker nodes:

            • setup Unique hostname to each machine using the following command:

              echo \"<node_internal_IP> <host_name>\" >> /etc/hosts\nhostnamectl set-hostname <host_name>\n

              For example:

              echo \"192.168.0.167 loadbalancer\" >> /etc/hosts\nhostnamectl set-hostname loadbalancer\n
            "},{"location":"other-tools/kubernetes/kubeadm/HA-clusters-with-kubeadm/#steps","title":"Steps","text":"
            1. Prepare the Loadbalancer node to communicate with the two master nodes' apiservers on their IPs via port 6443.

            2. Do following in all the nodes except the Loadbalancer node:

              • Disable swap.

              • Install kubelet and kubeadm.

              • Install container runtime - you will be using containerd.

            3. Initiate kubeadm control plane configuration on one of the master nodes.

            4. Save the new master and worker node join commands with the token.

            5. Join the second master node to the control plane using the join command.

            6. Join the worker nodes to the control plane using the join command.

            7. Configure kubeconfig($HOME/.kube/config) on loadbalancer node.

            8. Install kubectl on Loadbalancer node.

            9. Install CNI network plugin i.e. Flannel on Loadbalancer node.

            10. Validate all cluster components and nodes are visible on Loadbalancer node.

            11. Deploy a sample app and validate the app from Loadbalancer node.

            "},{"location":"other-tools/kubernetes/kubeadm/HA-clusters-with-kubeadm/#setting-up-loadbalancer","title":"Setting up loadbalancer","text":"

            You will use HAPROXY as the primary loadbalancer, but you can use any other options as well. This node will be not part of the K8s cluster but will be outside of the cluster and interacts with the cluster using ports.

            You have 2 master nodes. Which means the user can connect to either of the 2 apiservers. The loadbalancer will be used to loadbalance between the 2 apiservers.

            • Login to the loadbalancer node

            • Switch as root - sudo su

            • Update your repository and your system

              sudo apt-get update && sudo apt-get upgrade -y\n
            • Install haproxy

              sudo apt-get install haproxy -y\n
            • Edit haproxy configuration

              vi /etc/haproxy/haproxy.cfg\n

              Add the below lines to create a frontend configuration for loadbalancer -

              frontend fe-apiserver\nbind 0.0.0.0:6443\nmode tcp\noption tcplog\ndefault_backend be-apiserver\n

              Add the below lines to create a backend configuration for master1 and master2 nodes at port 6443.

              Note

              6443 is the default port of kube-apiserver

              backend be-apiserver\nmode tcp\noption tcplog\noption tcp-check\nbalance roundrobin\ndefault-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100\n\n    server master1 10.138.0.15:6443 check\n    server master2 10.138.0.16:6443 check\n

              Here - master1 and master2 are the hostnames of the master nodes and 10.138.0.15 and 10.138.0.16 are the corresponding internal IP addresses.

            • Ensure haproxy config file is correctly formatted:

              haproxy -c -q -V -f /etc/haproxy/haproxy.cfg\n
            • Restart and Verify haproxy

              systemctl restart haproxy\nsystemctl status haproxy\n

              Ensure haproxy is in running status.

              Run nc command as below:

              nc -v localhost 6443\nConnection to localhost 6443 port [tcp/*] succeeded!\n

              Note

              If you see failures for master1 and master2 connectivity, you can ignore them for time being as you have not yet installed anything on the servers.

            "},{"location":"other-tools/kubernetes/kubeadm/HA-clusters-with-kubeadm/#install-kubeadm-kubelet-and-containerd-on-master-and-worker-nodes","title":"Install kubeadm, kubelet and containerd on master and worker nodes","text":"

            kubeadm will not install or manage kubelet or kubectl for you, so you will need to ensure they match the version of the Kubernetes control plane you want kubeadm to install for you. You will install these packages on all of your machines:

            \u2022 kubeadm: the command to bootstrap the cluster.

            \u2022 kubelet: the component that runs on all of the machines in your cluster and does things like starting pods and containers.

            \u2022 kubectl: the command line util to talk to your cluster.

            In this step, you will install kubelet and kubeadm on the below nodes

            • master1

            • master2

            • worker1

            • worker2

            The below steps will be performed on all the above mentioned nodes:

            • SSH into all the 4 machines

            • Update the repositories and packages:

              sudo apt-get update && sudo apt-get upgrade -y\n
            • Turn off swap

              swapoff -a\nsudo sed -i '/ swap / s/^/#/' /etc/fstab\n
            • Install curl and apt-transport-https

              sudo apt-get update && sudo apt-get install -y apt-transport-https curl\n
            • Download the Google Cloud public signing key and add key to verify releases

              curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -\n
            • add kubernetes apt repo

              cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list\ndeb https://apt.kubernetes.io/ kubernetes-xenial main\nEOF\n
            • Install kubelet and kubeadm

              sudo apt-get update\nsudo apt-get install -y kubelet kubeadm\n
            • apt-mark hold is used so that these packages will not be updated/removed automatically

              sudo apt-mark hold kubelet kubeadm\n
            "},{"location":"other-tools/kubernetes/kubeadm/HA-clusters-with-kubeadm/#install-the-container-runtime-ie-containerd-on-master-and-worker-nodes","title":"Install the container runtime i.e. containerd on master and worker nodes","text":"

            To run containers in Pods, Kubernetes uses a container runtime.

            By default, Kubernetes uses the Container Runtime Interface (CRI) to interface with your chosen container runtime.

            • Install container runtime - containerd

              The first thing to do is configure the persistent loading of the necessary containerd modules. This forwarding IPv4 and letting iptables see bridged trafficis is done with the following command:

              cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf\noverlay\nbr_netfilter\nEOF\n\nsudo modprobe overlay\nsudo modprobe br_netfilter\n
            • Ensure net.bridge.bridge-nf-call-iptables is set to 1 in your sysctl config:

              # sysctl params required by setup, params persist across reboots\ncat <<EOF | sudo tee /etc/sysctl.d/k8s.conf\nnet.bridge.bridge-nf-call-iptables  = 1\nnet.bridge.bridge-nf-call-ip6tables = 1\nnet.ipv4.ip_forward                 = 1\nEOF\n
            • Apply sysctl params without reboot:

              sudo sysctl --system\n
            • Install the necessary dependencies with:

              sudo apt install -y curl gnupg2 software-properties-common apt-transport-https ca-certificates\n
            • The containerd.io packages in DEB and RPM formats are distributed by Docker. Add the required GPG key with:

              curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -\nsudo add-apt-repository \"deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable\"\n

              It's now time to Install and configure containerd:

              sudo apt update -y\nsudo apt install -y containerd.io\ncontainerd config default | sudo tee /etc/containerd/config.toml\n\n# Reload the systemd daemon with\nsudo systemctl daemon-reload\n\n# Start containerd\nsudo systemctl restart containerd\nsudo systemctl enable --now containerd\n

              You can verify containerd is running with the command:

              sudo systemctl status containerd\n
            "},{"location":"other-tools/kubernetes/kubeadm/HA-clusters-with-kubeadm/#configure-kubeadm-to-bootstrap-the-cluster","title":"Configure kubeadm to bootstrap the cluster","text":"

            You will start off by initializing only one master node. For this purpose, you choose master1 to initialize our first control plane but you can also do the same in master2.

            • SSH into master1 machine

            • Switch to root user: sudo su

              Configuring the kubelet cgroup driver

              From 1.22 onwards, if you do not set the cgroupDriver field under KubeletConfiguration, kubeadm will default it to systemd. So you do not need to do anything here by default but if you want you change it you can refer to this documentation.

            • Execute the below command to initialize the cluster:

              kubeadm config images pull\nkubeadm init --control-plane-endpoint\n\"LOAD_BALANCER_IP_OR_HOSTNAME:LOAD_BALANCER_PORT\" --upload-certs --pod-network-cidr=10.244.0.0/16\n

              Here, you can use either the IP address or the hostname of the loadbalancer in place of . You have not enabled the hostname of the server, i.e. loadbalancer as the LOAD_BALANCER_IP_OR_HOSTNAME that is visible from the master1 node. so instead of using not resolvable hostnames across your network, you will be using the IP address of the Loadbalancer server.

              The is the front end configuration port defined in HAPROXY configuration. For this, you have kept the port as 6443 which is the default apiserver port.

              Important Note

              --pod-network-cidr value depends upon what CNI plugin you going to use so need to be very careful while setting this CIDR values. In our case, you are going to use Flannel CNI network plugin so you will use: --pod-network-cidr=10.244.0.0/16. If you are opted to use Calico CNI network plugin then you need to use: --pod-network-cidr=192.168.0.0/16 and if you are opted to use Weave Net no need to pass this parameter.

              For example, our Flannel CNI network plugin based kubeadm init command with loadbalancer node with internal IP: 192.168.0.167 look like below:

              kubeadm config images pull\nkubeadm init --control-plane-endpoint \"192.168.0.167:6443\" --upload-certs --pod-network-cidr=10.244.0.0/16\n

              Save the output in some secure file for future use. This will show an unique token to join the control plane. The output from kubeadm init should looks like below:

              Your Kubernetes control-plane has initialized successfully!\n\nTo start using your cluster, you need to run the following as a regular user:\n\nmkdir -p $HOME/.kube\nsudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\nsudo chown $(id -u):$(id -g) $HOME/.kube/config\n\nAlternatively, if you are the root user, you can run:\n\nexport KUBECONFIG=/etc/kubernetes/admin.conf\n\nYou should now deploy a pod network to the cluster.\nRun \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\nhttps://kubernetes.io/docs/concepts/cluster-administration/addons/\n\nYou can now join any number of the control-plane node running the following\ncommand on each worker nodes as root:\n\nkubeadm join 192.168.0.167:6443 --token cnslau.kd5fjt96jeuzymzb \\\n    --discovery-token-ca-cert-hash sha256:871ab3f050bc9790c977daee9e44cf52e15ee3\n    7ab9834567333b939458a5bfb5 \\\n    --control-plane --certificate-key 824d9a0e173a810416b4bca7038fb33b616108c17abcbc5eaef8651f11e3d146\n\nPlease note that the certificate-key gives access to cluster sensitive data, keep\nit secret!\nAs a safeguard, uploaded-certs will be deleted in two hours; If necessary, you\ncan use \"kubeadm init phase upload-certs --upload-certs\" to reload certs afterward.\n\nThen you can join any number of worker nodes by running the following on each as\nroot:\n\nkubeadm join 192.168.0.167:6443 --token cnslau.kd5fjt96jeuzymzb \\\n    --discovery-token-ca-cert-hash sha256:871ab3f050bc9790c977daee9e44cf52e15ee37ab9834567333b939458a5bfb5\n

              The output consists of 3 major tasks:

              A. Setup kubeconfig using on current master node: As you are running as root user so you need to run the following command:

              export KUBECONFIG=/etc/kubernetes/admin.conf\n

              We need to run the below commands as a normal user to use the kubectl from terminal.

              mkdir -p $HOME/.kube\nsudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\nsudo chown $(id -u):$(id -g) $HOME/.kube/config\n

              Now the machine is initialized as master.

              Warning

              Kubeadm signs the certificate in the admin.conf to have Subject: O = system:masters, CN = kubernetes-admin. system:masters is a break-glass, super user group that bypasses the authorization layer (e.g. RBAC). Do not share the admin.conf file with anyone and instead grant users custom permissions by generating them a kubeconfig file using the kubeadm kubeconfig user command.

              B. Setup a new control plane (master) i.e. master2 by running following command on master2 node:

              kubeadm join 192.168.0.167:6443 --token cnslau.kd5fjt96jeuzymzb \\\n    --discovery-token-ca-cert-hash sha256:871ab3f050bc9790c977daee9e44cf52e1\n        5ee37ab9834567333b939458a5bfb5 \\\n    --control-plane --certificate-key 824d9a0e173a810416b4bca7038fb33b616108c17abcbc5eaef8651f11e3d146\n

              C. Join worker nodes running following command on individual worker nodes:

              kubeadm join 192.168.0.167:6443 --token cnslau.kd5fjt96jeuzymzb \\\n    --discovery-token-ca-cert-hash sha256:871ab3f050bc9790c977daee9e44cf52e15ee37ab9834567333b939458a5bfb5\n

              Important Note

              Your output will be different than what is provided here. While performing the rest of the demo, ensure that you are executing the command provided by your output and dont copy and paste from here.

              If you do not have the token, you can get it by running the following command on the control-plane node:

              kubeadm token list\n

              The output is similar to this:

              TOKEN     TTL  EXPIRES      USAGES           DESCRIPTION            EXTRA GROUPS\n8ewj1p... 23h  2018-06-12   authentication,  The default bootstrap  system:\n                            signing          token generated by     bootstrappers:\n                                            'kubeadm init'.         kubeadm:\n                                                                    default-node-token\n

              If you missed the join command, execute the following command kubeadm token create --print-join-command in the master node to recreate the token with the join command.

              root@master:~$ kubeadm token create --print-join-command\n\nkubeadm join 10.2.0.4:6443 --token xyzeyi.wxer3eg9vj8hcpp2 \\\n--discovery-token-ca-cert-hash sha256:ccfc92b2a31b002c3151cdbab77ff4dc32ef13b213fa3a9876e126831c76f7fa\n

              By default, tokens expire after 24 hours. If you are joining a node to the cluster after the current token has expired, you can create a new token by running the following command on the control-plane node:

              kubeadm token create\n

              The output is similar to this: 5didvk.d09sbcov8ph2amjw

              We can use this new token to join:

              kubeadm join <master-ip>:<master-port> --token <token> \\\n    --discovery-token-ca-cert-hash sha256:<hash>\n
              • SSH into master2

              • Switch to root user:sudo su

              • Check the command provided by the output of master1:

                You can now use the below command to add another control-plane node(master) to the control plane:

                kubeadm join 192.168.0.167:6443 --token cnslau.kd5fjt96jeuzymzb\n    --discovery-token-ca-cert-hash sha256:871ab3f050bc9790c977daee9e44cf52e15ee3\n    7ab9834567333b939458a5bfb5 \\\n    --control-plane --certificate-key 824d9a0e173a810416b4bca7038fb33b616108c17abcbc5eaef8651f11e3d146\n
              • Execute the kubeadm join command for control plane on master2

                Your output should look like:

                This node has joined the cluster and a new control plane instance was created:\n\n* Certificate signing request was sent to apiserver and approval was received.\n* The Kubelet was informed of the new secure connection details.\n* Control plane (master) label and taint were applied to the new node.\n* The Kubernetes control plane instances scaled up.\n* A new etcd member was added to the local/stacked etcd cluster.\n

              Now that you have initialized both the masters - you can now work on bootstrapping the worker nodes.

              • SSH into worker1 and worker2

              • Switch to root user on both the machines: sudo su

              • Check the output given by the init command on master1 to join worker node:

                kubeadm join 192.168.0.167:6443 --token cnslau.kd5fjt96jeuzymzb \\\n    --discovery-token-ca-cert-hash sha256:871ab3f050bc9790c977daee9e44cf52e15ee37ab9834567333b939458a5bfb5\n
              • Execute the above command on both the nodes:

              • Your output should look like:

                This node has joined the cluster:\n* Certificate signing request was sent to apiserver and a response was received.\n* The Kubelet was informed of the new secure connection details.\n
              "},{"location":"other-tools/kubernetes/kubeadm/HA-clusters-with-kubeadm/#configure-kubeconfig-on-loadbalancer-node","title":"Configure kubeconfig on loadbalancer node","text":"

              Now that you have configured the master and the worker nodes, its now time to configure Kubeconfig (.kube) on the loadbalancer node. It is completely up to you if you want to use the loadbalancer node to setup kubeconfig. kubeconfig can also be setup externally on a separate machine which has access to loadbalancer node. For the purpose of this demo you will use loadbalancer node to host kubeconfig and kubectl.

              • SSH into loadbalancer node

              • Switch to root user: sudo su

              • Create a directory: .kube at $HOME of root user

                mkdir -p $HOME/.kube\n
              • SCP configuration file from any one master node to loadbalancer node

                scp master1:/etc/kubernetes/admin.conf $HOME/.kube/config\n

                Important Note

                If you havent setup ssh connection between master node and loadbalancer, you can manually copy the contents of the file /etc/kubernetes/admin.conf from master1 node and then paste it to $HOME/.kube/config file on the loadbalancer node. Ensure that the kubeconfig file path is $HOME/.kube/config on the loadbalancer node.

              • Provide appropriate ownership to the copied file

                chown $(id -u):$(id -g) $HOME/.kube/config\n
              "},{"location":"other-tools/kubernetes/kubeadm/HA-clusters-with-kubeadm/#install-kubectl","title":"Install kubectl","text":"
              • Install kubectl binary

                kubectl: the command line util to talk to your cluster.

                snap install kubectl --classic\n

                This outputs:

                kubectl 1.26.1 from Canonical\u2713 installed\n
              • Verify the cluster

                kubectl get nodes\n\nNAME STATUS ROLES AGE VERSION\nmaster1 NotReady control-plane,master 21m v1.26.1\nmaster2 NotReady control-plane,master 15m v1.26.1\nworker1 Ready <none> 9m17s v1.26.1\nworker2 Ready <none> 9m25s v1.26.1\n
              "},{"location":"other-tools/kubernetes/kubeadm/HA-clusters-with-kubeadm/#install-cni-network-plugin","title":"Install CNI network plugin","text":""},{"location":"other-tools/kubernetes/kubeadm/HA-clusters-with-kubeadm/#cni-overview","title":"CNI overview","text":"

              Managing a network where containers can interoperate efficiently is very important. Kubernetes has adopted the Container Network Interface(CNI) specification for managing network resources on a cluster. This relatively simple specification makes it easy for Kubernetes to interact with a wide range of CNI-based software solutions. Using this CNI plugin allows Kubernetes pods to have the same IP address inside the pod as they do on the VPC network. Make sure the configuration corresponds to the Pod CIDR specified in the kubeadm configuration file if applicable.

              You must deploy a CNI based Pod network add-on so that your Pods can communicate with each other. Cluster DNS (CoreDNS) will not start up before a network is installed. To verify you can run this command: kubectl get po -n kube-system:

              You should see the following output. You will see the two coredns-* pods in a pending state. It is the expected behavior. Once we install the network plugin, it will be in a Running state.

              Output Example:

              root@loadbalancer:~$ kubectl get po -n kube-system\n NAME                               READY  STATUS   RESTARTS  AGE\ncoredns-558bd4d5db-5jktc             0/1   Pending   0        10m\ncoredns-558bd4d5db-xdc5x             0/1   Pending   0        10m\netcd-master1                         1/1   Running   0        11m\nkube-apiserver-master1               1/1   Running   0        11m\nkube-controller-manager-master1      1/1   Running   0        11m\nkube-proxy-5jfh5                     1/1   Running   0        10m\nkube-scheduler-master1               1/1   Running   0        11m\n
              "},{"location":"other-tools/kubernetes/kubeadm/HA-clusters-with-kubeadm/#supported-cni-options","title":"Supported CNI options","text":"

              To read more about the currently supported base CNI solutions for Kubernetes read here and also read this.

              The below command can be run on the Loadbalancer node to install the CNI plugin:

              kubectl apply -f https://github.com/coreos/flannel/raw/master/Documentation/kube-flannel.yml\n

              As you had passed --pod-network-cidr=10.244.0.0/16 with kubeadm init so this should work for Flannel CNI.

              Using Other CNI Options

              For Calico CNI plugin to work correctly, you need to pass --pod-network-cidr=192.168.0.0/16 with kubeadm init and then you can run: kubectl apply -f https://docs.projectcalico.org/v3.8/manifests/calico.yaml

              For Weave Net CNI plugin to work correctly, you don't need to pass --pod-network-cidr with kubeadm init and then you can run: kubectl apply -f \"https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\\n')\"

              Dual Network:

              It is highly recommended to follow an internal/external network layout for your cluster, as showed in this diagram:

              To enable this just give two different names to the internal and external interface, according to your distro of choiche naming scheme:

              external_interface: eth0\ninternal_interface: eth1\n

              Also you can decide here what CIDR should your cluster use

              cluster_cidr: 10.43.0.0/16\nservice_cidr: 10.44.0.0/16\n

              Once you successfully installed the Flannel CNI component to your cluster. You can now verify your HA cluster running:

              kubectl get nodes\n\nNAME      STATUS   ROLES                    AGE   VERSION\nmaster1   Ready    control-plane,master     22m   v1.26.1\nmaster2   Ready    control-plane,master     17m   v1.26.1\nworker1   Ready    <none>                   10m   v1.26.1\nworker2   Ready    <none>                   10m   v1.26.1\n
              "},{"location":"other-tools/kubernetes/kubeadm/HA-clusters-with-kubeadm/#deploy-a-sample-nginx-application-from-one-of-the-master-nodes","title":"Deploy A Sample Nginx Application From one of the master nodes","text":"

              Now that we have all the components to make the cluster and applications work, let\u2019s deploy a sample Nginx application and see if we can access it over a NodePort that has port range of 30000-32767.

              The below command can be run on:

              kubectl run nginx --image=nginx --port=80\nkubectl expose pod nginx --port=80 --type=NodePort\n

              To check which NodePort is opened and running the Nginx run:

              kubectl get svc\n

              The output will show:

              Once the deployment is up, you should be able to access the Nginx home page on the allocated NodePort from either of the worker nodes' Floating IP.

              To check which worker node is serving nginx, you can check NODE column running the following command:

              kubectl get pods --all-namespaces --output wide\n

              OR,

              kubectl get pods -A -o wide\n

              This will show like below:

              Go to browser, visit http://<Worker-Floating-IP>:<NodePort> i.e. http://128.31.25.246:32713 to check the nginx default page. Here Worker_Floating-IP corresponds to the Floating IP of the nginx pod running worker node i.e. worker2.

              For your example,

              "},{"location":"other-tools/kubernetes/kubeadm/HA-clusters-with-kubeadm/#deploy-a-k8s-dashboard","title":"Deploy A K8s Dashboard","text":"

              You will going to setup K8dash/Skooner to view a dashboard that shows all your K8s cluster components.

              • SSH into loadbalancer node

              • Switch to root user: sudo su

              • Apply available deployment by running the following command:

                kubectl apply -f https://raw.githubusercontent.com/skooner-k8s/skooner/master/kubernetes-skooner-nodeport.yaml\n

                This will map Skooner port 4654 to a randomly selected port on the running node.

                The assigned NodePort can be found running:

                kubectl get svc --namespace=kube-system\n

                OR,

                kubectl get po,svc -n kube-system\n

                To check which worker node is serving skooner-*, you can check NODE column running the following command:

                kubectl get pods --all-namespaces --output wide\n

                OR,

                kubectl get pods -A -o wide\n

                This will show like below:

                Go to browser, visit http://<Worker-Floating-IP>:<NodePort> i.e. http://128.31.25.246:30495 to check the skooner dashboard page. Here Worker_Floating-IP corresponds to the Floating IP of the skooner-* pod running worker node i.e. worker2.

              Setup the Service Account Token to access the Skooner Dashboard:

              The first (and easiest) option is to create a dedicated service account. Run the following commands:

              • Create the service account in the current namespace (we assume default)

                kubectl create serviceaccount skooner-sa\n
              • Give that service account root on the cluster

                kubectl create clusterrolebinding skooner-sa --clusterrole=cluster-admin --serviceaccount=default:skooner-sa\n
              • Create a secret that was created to hold the token for the SA:

                kubectl apply -f - <<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n    name: skooner-sa-token\n    annotations:\n        kubernetes.io/service-account.name: skooner-sa\ntype: kubernetes.io/service-account-token\nEOF\n

                Information

                Since 1.22, this type of Secret is no longer used to mount credentials into Pods, and obtaining tokens via the TokenRequest API is recommended instead of using service account token Secret objects. Tokens obtained from the TokenRequest API are more secure than ones stored in Secret objects, because they have a bounded lifetime and are not readable by other API clients. You can use the kubectl create token command to obtain a token from the TokenRequest API. For example: kubectl create token skooner-sa, where skooner-sa is service account name.

              • Find the secret that was created to hold the token for the SA

                kubectl get secrets\n
              • Show the contents of the secret to extract the token

                kubectl describe secret skooner-sa-token\n

              Copy the token value from the secret detail and enter it into the login screen to access the dashboard.

              "},{"location":"other-tools/kubernetes/kubeadm/HA-clusters-with-kubeadm/#watch-demo-video-showing-how-to-setup-the-cluster","title":"Watch Demo Video showing how to setup the cluster","text":"

              Here\u2019s a recorded demo video on how to setup HA K8s cluster using kubeadm as explained above.

              "},{"location":"other-tools/kubernetes/kubeadm/HA-clusters-with-kubeadm/#very-important-certificates-renewal","title":"Very Important: Certificates Renewal","text":"

              Client certificates generated by kubeadm expire after one year unless the Kubernetes version is upgraded or the certificates are manually renewed.

              To renew certificates manually, you can use the kubeadm certs renew command with the appropriate command line options. After running the command, you should restart the control plane Pods.

              kubeadm certs renew can renew any specific certificate or, with the subcommand all, it can renew all of them, as shown below:

              kubeadm certs renew all\n

              Once renewing certificates is done. You must restart the kube-apiserver, kube-controller-manager, kube-scheduler and etcd, so that they can use the new certificates by running:

              systemctl restart kubelet\n

              Then, update the new kube config file:

              export KUBECONFIG=/etc/kubernetes/admin.conf\nsudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n

              Don't Forget to Update the older kube config file

              Update wherever you are using the older kube config to connect with the cluster.

              "},{"location":"other-tools/kubernetes/kubeadm/HA-clusters-with-kubeadm/#clean-up","title":"Clean Up","text":"
              • To view the Cluster info:

                kubectl cluster-info\n
              • To delete your local references to the cluster:

                kubectl config delete-cluster\n
              "},{"location":"other-tools/kubernetes/kubeadm/HA-clusters-with-kubeadm/#how-to-remove-the-node","title":"How to Remove the node?","text":"

              Talking to the control-plane node with the appropriate credentials, run:

              kubectl drain <node name> --delete-emptydir-data --force --ignore-daemonsets\n
              • Before removing the node, reset the state installed by kubeadm:

                kubeadm reset\n

                The reset process does not reset or clean up iptables rules or IPVS tables. If you wish to reset iptables, you must do so manually:

                iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X\n

                If you want to reset the IPVS tables, you must run the following command:

                ipvsadm -C\n
              • Now remove the node:

                kubectl delete node <node name>\n

              If you wish to start over, run kubeadm init or kubeadm join with the appropriate arguments.

              "},{"location":"other-tools/kubernetes/kubeadm/single-master-clusters-with-kubeadm/","title":"Creating a Single Master cluster with kubeadm","text":""},{"location":"other-tools/kubernetes/kubeadm/single-master-clusters-with-kubeadm/#objectives","title":"Objectives","text":"
              • Install a single control-plane(master) Kubernetes cluster

              • Install a Pod network on the cluster so that your Pods can talk to each other

              • Deploy and test a sample app

              • Deploy K8s Dashboard to view all cluster's components

              "},{"location":"other-tools/kubernetes/kubeadm/single-master-clusters-with-kubeadm/#components-and-architecure","title":"Components and architecure","text":"

              You can learn about each component from Kubernetes Componets.

              "},{"location":"other-tools/kubernetes/kubeadm/single-master-clusters-with-kubeadm/#pre-requisite","title":"Pre-requisite","text":"

              We will need 1 control-plane(master) and 2 worker node to create a single control-plane kubernetes cluster using kubeadm. We are using following setting for this purpose:

              • 1 Linux machine for master, ubuntu-20.04-x86_64, cpu-su.2 flavor with 2vCPU, 8GB RAM, 20GB storage.

              • 2 Linux machines for worker, ubuntu-20.04-x86_64, cpu-su.1 flavor with 1vCPU, 4GB RAM, 20GB storage - also assign Floating IPs to both of the worker nodes.

              • ssh access to all machines: Read more here on how to set up SSH on your remote VMs.

              • Create 2 security groups with appropriate ports and protocols:

              i. To be used by the master nodes:

              ii. To be used by the worker nodes:

              • setup Unique hostname to each machine using the following command:

                echo \"<node_internal_IP> <host_name>\" >> /etc/hosts\nhostnamectl set-hostname <host_name>\n

                For example:

                echo \"192.168.0.167 master\" >> /etc/hosts\nhostnamectl set-hostname master\n
              "},{"location":"other-tools/kubernetes/kubeadm/single-master-clusters-with-kubeadm/#steps","title":"Steps","text":"
              1. Disable swap on all nodes.

              2. Install kubeadm, kubelet, and kubectl on all the nodes.

              3. Install container runtime on all nodes- you will be using containerd.

              4. Initiate kubeadm control plane configuration on the master node.

              5. Save the worker node join command with the token.

              6. Install CNI network plugin i.e. Flannel on master node.

              7. Join the worker node to the master node (control plane) using the join command.

              8. Validate all cluster components and nodes are visible on master node.

              9. Deploy a sample app and validate the app from master node.

              "},{"location":"other-tools/kubernetes/kubeadm/single-master-clusters-with-kubeadm/#install-kubeadm-kubelet-and-containerd-on-master-and-worker-nodes","title":"Install kubeadm, kubelet and containerd on master and worker nodes","text":"

              kubeadm will not install or manage kubelet or kubectl for you, so you will need to ensure they match the version of the Kubernetes control plane you want kubeadm to install for you. You will install these packages on all of your machines:

              \u2022 kubeadm: the command to bootstrap the cluster.

              \u2022 kubelet: the component that runs on all of the machines in your cluster and does things like starting pods and containers.

              \u2022 kubectl: the command line util to talk to your cluster.

              In this step, you will install kubelet and kubeadm on the below nodes

              • master

              • worker1

              • worker2

              The below steps will be performed on all the above mentioned nodes:

              • SSH into all the 3 machines

              • Update the repositories and packages:

                sudo apt-get update && sudo apt-get upgrade -y\n
              • Turn off swap

                swapoff -a\nsudo sed -i '/ swap / s/^/#/' /etc/fstab\n
              • Install curl and apt-transport-https

                sudo apt-get update && sudo apt-get install -y apt-transport-https curl\n
              • Download the Google Cloud public signing key and add key to verify releases

                curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -\n
              • add kubernetes apt repo

                cat <<EOF | sudo tee /etc/apt/sources.list.d/kubernetes.list\ndeb https://apt.kubernetes.io/ kubernetes-xenial main\nEOF\n
              • Install kubelet, kubeadm, and kubectl

                sudo apt-get update\nsudo apt-get install -y kubelet kubeadm kubectl\n
              • apt-mark hold is used so that these packages will not be updated/removed automatically

                sudo apt-mark hold kubelet kubeadm kubectl\n
              "},{"location":"other-tools/kubernetes/kubeadm/single-master-clusters-with-kubeadm/#install-the-container-runtime-ie-containerd-on-master-and-worker-nodes","title":"Install the container runtime i.e. containerd on master and worker nodes","text":"

              To run containers in Pods, Kubernetes uses a container runtime.

              By default, Kubernetes uses the Container Runtime Interface (CRI) to interface with your chosen container runtime.

              • Install container runtime - containerd

                The first thing to do is configure the persistent loading of the necessary containerd modules. This forwarding IPv4 and letting iptables see bridged trafficis is done with the following command:

                cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf\noverlay\nbr_netfilter\nEOF\n\nsudo modprobe overlay\nsudo modprobe br_netfilter\n
              • Ensure net.bridge.bridge-nf-call-iptables is set to 1 in your sysctl config:

                # sysctl params required by setup, params persist across reboots\ncat <<EOF | sudo tee /etc/sysctl.d/k8s.conf\nnet.bridge.bridge-nf-call-iptables  = 1\nnet.bridge.bridge-nf-call-ip6tables = 1\nnet.ipv4.ip_forward                 = 1\nEOF\n
              • Apply sysctl params without reboot:

                sudo sysctl --system\n
              • Install the necessary dependencies with:

                sudo apt install -y curl gnupg2 software-properties-common apt-transport-https ca-certificates\n
              • The containerd.io packages in DEB and RPM formats are distributed by Docker. Add the required GPG key with:

                curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -\nsudo add-apt-repository \"deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable\"\n

                It's now time to Install and configure containerd:

                sudo apt update -y\nsudo apt install -y containerd.io\ncontainerd config default | sudo tee /etc/containerd/config.toml\n\n# Reload the systemd daemon with\nsudo systemctl daemon-reload\n\n# Start containerd\nsudo systemctl restart containerd\nsudo systemctl enable --now containerd\n

                You can verify containerd is running with the command:

                sudo systemctl status containerd\n

                Configuring the kubelet cgroup driver

                From 1.22 onwards, if you do not set the cgroupDriver field under KubeletConfiguration, kubeadm will default it to systemd. So you do not need to do anything here by default but if you want you change it you can refer to this documentation.

              "},{"location":"other-tools/kubernetes/kubeadm/single-master-clusters-with-kubeadm/#configure-kubeadm-to-bootstrap-the-cluster-on-master-node","title":"Configure kubeadm to bootstrap the cluster on master node","text":"

              Run the below command on the master node i.e. master that you want to setup as control plane.

              • SSH into master machine

              • Switch to root user: sudo su

              • Execute the below command to initialize the cluster:

                export MASTER_IP=<Master-Internal-IP>\nkubeadm config images pull\nkubeadm init --apiserver-advertise-address=${MASTER_IP} --pod-network-cidr=10.244.0.0/16\n

                Important Note

                Please make sure you replace the correct IP of the node with <Master-Internal-IP> which is the Internal IP of master node. --pod-network-cidr value depends upon what CNI plugin you going to use so need to be very careful while setting this CIDR values. In our case, you are going to use Flannel CNI network plugin so you will use: --pod-network-cidr=10.244.0.0/16. If you are opted to use Calico CNI network plugin then you need to use: --pod-network-cidr=192.168.0.0/16 and if you are opted to use Weave Net no need to pass this parameter.

                For example, our Flannel CNI network plugin based kubeadm init command with master node with internal IP: 192.168.0.167 look like below:

                For example:

                export MASTER_IP=192.168.0.167\nkubeadm config images pull\nkubeadm init --apiserver-advertise-address=${MASTER_IP} --pod-network-cidr=10.244.0.0/16\n

                Save the output in some secure file for future use. This will show an unique token to join the control plane. The output from kubeadm init should looks like below:

                Your Kubernetes control-plane has initialized successfully!\n\nTo start using your cluster, you need to run the following as a regular user:\n\nmkdir -p $HOME/.kube\nsudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\nsudo chown $(id -u):$(id -g) $HOME/.kube/config\n\nAlternatively, if you are the root user, you can run:\n\nexport KUBECONFIG=/etc/kubernetes/admin.conf\n\nYou should now deploy a pod network to the cluster.\nRun \"kubectl apply -f [podnetwork].yaml\" with one of the options listed at:\nhttps://kubernetes.io/docs/concepts/cluster-administration/addons/\n\nYou can now join any number of the control-plane node running the following\ncommand on each worker nodes as root:\n\nkubeadm join 192.168.0.167:6443 --token cnslau.kd5fjt96jeuzymzb \\\n    --discovery-token-ca-cert-hash sha256:871ab3f050bc9790c977daee9e44cf52e15ee3\n    7ab9834567333b939458a5bfb5 \\\n    --control-plane --certificate-key 824d9a0e173a810416b4bca7038fb33b616108c17abcbc5eaef8651f11e3d146\n\nPlease note that the certificate-key gives access to cluster sensitive data, keep\nit secret!\nAs a safeguard, uploaded-certs will be deleted in two hours; If necessary, you\ncan use \"kubeadm init phase upload-certs --upload-certs\" to reload certs afterward.\n\nThen you can join any number of worker nodes by running the following on each as\nroot:\n\nkubeadm join 192.168.0.167:6443 --token cnslau.kd5fjt96jeuzymzb \\\n    --discovery-token-ca-cert-hash sha256:871ab3f050bc9790c977daee9e44cf52e15ee37ab9834567333b939458a5bfb5\n

                The output consists of 2 major tasks:

                A. Setup kubeconfig using on current master node: As you are running as root user so you need to run the following command:

                export KUBECONFIG=/etc/kubernetes/admin.conf\n

                We need to run the below commands as a normal user to use the kubectl from terminal.

                mkdir -p $HOME/.kube\nsudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\nsudo chown $(id -u):$(id -g) $HOME/.kube/config\n

                Now the machine is initialized as master.

                Warning

                Kubeadm signs the certificate in the admin.conf to have Subject: O = system:masters, CN = kubernetes-admin. system:masters is a break-glass, super user group that bypasses the authorization layer (e.g. RBAC). Do not share the admin.conf file with anyone and instead grant users custom permissions by generating them a kubeconfig file using the kubeadm kubeconfig user command.

                B. Join worker nodes running following command on individual worker nodes:

                kubeadm join 192.168.0.167:6443 --token cnslau.kd5fjt96jeuzymzb \\\n    --discovery-token-ca-cert-hash sha256:871ab3f050bc9790c977daee9e44cf52e15ee37ab9834567333b939458a5bfb5\n

                Important Note

                Your output will be different than what is provided here. While performing the rest of the demo, ensure that you are executing the command provided by your output and dont copy and paste from here.

                If you do not have the token, you can get it by running the following command on the control-plane node:

                kubeadm token list\n

                The output is similar to this:

                TOKEN     TTL  EXPIRES      USAGES           DESCRIPTION            EXTRA GROUPS\n8ewj1p... 23h  2018-06-12   authentication,  The default bootstrap  system:\n                            signing          token generated by     bootstrappers:\n                                            'kubeadm init'.         kubeadm:\n                                                                    default-node-token\n

                If you missed the join command, execute the following command kubeadm token create --print-join-command in the master node to recreate the token with the join command.

                root@master:~$ kubeadm token create --print-join-command\n\nkubeadm join 10.2.0.4:6443 --token xyzeyi.wxer3eg9vj8hcpp2 \\\n--discovery-token-ca-cert-hash sha256:ccfc92b2a31b002c3151cdbab77ff4dc32ef13b213fa3a9876e126831c76f7fa\n

                By default, tokens expire after 24 hours. If you are joining a node to the cluster after the current token has expired, you can create a new token by running the following command on the control-plane node:

                kubeadm token create\n

                The output is similar to this: 5didvk.d09sbcov8ph2amjw

                We can use this new token to join:

                kubeadm join <master-ip>:<master-port> --token <token> \\\n    --discovery-token-ca-cert-hash sha256:<hash>\n

              Now that you have initialized the master - you can now work on bootstrapping the worker nodes.

              • SSH into worker1 and worker2

              • Switch to root user on both the machines: sudo su

              • Check the output given by the init command on master to join worker node:

                kubeadm join 192.168.0.167:6443 --token cnslau.kd5fjt96jeuzymzb \\\n    --discovery-token-ca-cert-hash sha256:871ab3f050bc9790c977daee9e44cf52e15ee37ab9834567333b939458a5bfb5\n
              • Execute the above command on both the nodes:

              • Your output should look like:

                This node has joined the cluster:\n* Certificate signing request was sent to apiserver and a response was received.\n* The Kubelet was informed of the new secure connection details.\n
              "},{"location":"other-tools/kubernetes/kubeadm/single-master-clusters-with-kubeadm/#validate-all-cluster-components-and-nodes-are-visible-on-all-nodes","title":"Validate all cluster components and nodes are visible on all nodes","text":"
              • Verify the cluster

                kubectl get nodes\n\nNAME      STATUS        ROLES                  AGE     VERSION\nmaster    NotReady      control-plane,master   21m     v1.26.1\nworker1   Ready         <none>                 9m17s   v1.26.1\nworker2   Ready         <none>                 9m25s   v1.26.1\n
              "},{"location":"other-tools/kubernetes/kubeadm/single-master-clusters-with-kubeadm/#install-cni-network-plugin","title":"Install CNI network plugin","text":""},{"location":"other-tools/kubernetes/kubeadm/single-master-clusters-with-kubeadm/#cni-overview","title":"CNI overview","text":"

              Managing a network where containers can interoperate efficiently is very important. Kubernetes has adopted the Container Network Interface(CNI) specification for managing network resources on a cluster. This relatively simple specification makes it easy for Kubernetes to interact with a wide range of CNI-based software solutions. Using this CNI plugin allows Kubernetes pods to have the same IP address inside the pod as they do on the VPC network. Make sure the configuration corresponds to the Pod CIDR specified in the kubeadm configuration file if applicable.

              You must deploy a CNI based Pod network add-on so that your Pods can communicate with each other. Cluster DNS (CoreDNS) will not start up before a network is installed. To verify you can run this command: kubectl get po -n kube-system:

              You should see the following output. You will see the two coredns-* pods in a pending state. It is the expected behavior. Once we install the network plugin, it will be in a Running state.

              Output Example:

              root@master:~$ kubectl get po -n kube-system\n NAME                               READY  STATUS   RESTARTS  AGE\ncoredns-558bd4d5db-5jktc             0/1   Pending   0        10m\ncoredns-558bd4d5db-xdc5x             0/1   Pending   0        10m\netcd-master1                         1/1   Running   0        11m\nkube-apiserver-master1               1/1   Running   0        11m\nkube-controller-manager-master1      1/1   Running   0        11m\nkube-proxy-5jfh5                     1/1   Running   0        10m\nkube-scheduler-master1               1/1   Running   0        11m\n
              "},{"location":"other-tools/kubernetes/kubeadm/single-master-clusters-with-kubeadm/#supported-cni-options","title":"Supported CNI options","text":"

              To read more about the currently supported base CNI solutions for Kubernetes read here and also read this.

              The below command can be run on the master node to install the CNI plugin:

              kubectl apply -f https://github.com/coreos/flannel/raw/master/Documentation/kube-flannel.yml\n

              As you had passed --pod-network-cidr=10.244.0.0/16 with kubeadm init so this should work for Flannel CNI.

              Using Other CNI Options

              For Calico CNI plugin to work correctly, you need to pass --pod-network-cidr=192.168.0.0/16 with kubeadm init and then you can run: kubectl apply -f https://docs.projectcalico.org/v3.8/manifests/calico.yaml

              For Weave Net CNI plugin to work correctly, you don't need to pass --pod-network-cidr with kubeadm init and then you can run: kubectl apply -f \"https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\\n')\"

              Dual Network:

              It is highly recommended to follow an internal/external network layout for your cluster, as showed in this diagram:

              To enable this just give two different names to the internal and external interface, according to your distro of choiche naming scheme:

              external_interface: eth0\ninternal_interface: eth1\n

              Also you can decide here what CIDR should your cluster use

              cluster_cidr: 10.43.0.0/16\nservice_cidr: 10.44.0.0/16\n

              Once you successfully installed the Flannel CNI component to your cluster. You can now verify your HA cluster running:

              kubectl get nodes\n\nNAME      STATUS   ROLES                    AGE   VERSION\nmaster    Ready    control-plane,master     22m   v1.26.1\nworker1   Ready    <none>                   10m   v1.26.1\nworker2   Ready    <none>                   10m   v1.26.1\n
              "},{"location":"other-tools/kubernetes/kubeadm/single-master-clusters-with-kubeadm/#watch-recorded-video-showing-the-above-steps-on-setting-up-the-cluster","title":"Watch Recorded Video showing the above steps on setting up the cluster","text":"

              Here\u2019s a quick recorded demo video upto this point where we successfully setup single master K8s cluster using Kubeadm.

              "},{"location":"other-tools/kubernetes/kubeadm/single-master-clusters-with-kubeadm/#deploy-a-sample-nginx-application-from-the-master-node","title":"Deploy A Sample Nginx Application From the master node","text":"

              Now that we have all the components to make the cluster and applications work, let\u2019s deploy a sample Nginx application and see if we can access it over a NodePort that has port range of 30000-32767.

              The below command can be run on:

              kubectl run nginx --image=nginx --port=80\nkubectl expose pod nginx --port=80 --type=NodePort\n

              To check which NodePort is opened and running the Nginx run:

              kubectl get svc\n

              The output will show:

              Once the deployment is up, you should be able to access the Nginx home page on the allocated NodePort from either of the worker nodes' Floating IP.

              To check which worker node is serving nginx, you can check NODE column running the following command:

              kubectl get pods --all-namespaces --output wide\n

              OR,

              kubectl get pods -A -o wide\n

              This will show like below:

              Go to browser, visit http://<Worker-Floating-IP>:<NodePort> i.e. http://128.31.25.246:32713 to check the nginx default page. Here Worker_Floating-IP corresponds to the Floating IP of the nginx pod running worker node i.e. worker2.

              For your example,

              "},{"location":"other-tools/kubernetes/kubeadm/single-master-clusters-with-kubeadm/#deploy-a-k8s-dashboard","title":"Deploy A K8s Dashboard","text":"

              You will going to setup K8dash/Skooner to view a dashboard that shows all your K8s cluster components.

              • SSH into master node

              • Switch to root user: sudo su

              • Apply available deployment by running the following command:

                kubectl apply -f https://raw.githubusercontent.com/skooner-k8s/skooner/master/kubernetes-skooner-nodeport.yaml\n

                This will map Skooner port 4654 to a randomly selected port from the master node. The assigned NodePort on the master node can be found running:

                kubectl get svc --namespace=kube-system\n

                OR,

                kubectl get po,svc -n kube-system\n

                To check which worker node is serving skooner-*, you can check NODE column running the following command:

                kubectl get pods --all-namespaces --output wide\n

                OR,

                kubectl get pods -A -o wide\n

                This will show like below:

                Go to browser, visit http://<Worker-Floating-IP>:<NodePort> i.e. http://128.31.25.246:30495 to check the skooner dashboard page. Here Worker_Floating-IP corresponds to the Floating IP of the skooner-* pod running worker node i.e. worker2.

              Setup the Service Account Token to access the Skooner Dashboard:

              The first (and easiest) option is to create a dedicated service account. Run the following commands:

              • Create the service account in the current namespace (we assume default)

                kubectl create serviceaccount skooner-sa\n
              • Give that service account root on the cluster

                kubectl create clusterrolebinding skooner-sa --clusterrole=cluster-admin --serviceaccount=default:skooner-sa\n
              • Create a secret that was created to hold the token for the SA:

                kubectl apply -f - <<EOF\napiVersion: v1\nkind: Secret\nmetadata:\n    name: skooner-sa-token\n    annotations:\n        kubernetes.io/service-account.name: skooner-sa\ntype: kubernetes.io/service-account-token\nEOF\n

                Information

                Since 1.22, this type of Secret is no longer used to mount credentials into Pods, and obtaining tokens via the TokenRequest API is recommended instead of using service account token Secret objects. Tokens obtained from the TokenRequest API are more secure than ones stored in Secret objects, because they have a bounded lifetime and are not readable by other API clients. You can use the kubectl create token command to obtain a token from the TokenRequest API. For example: kubectl create token skooner-sa, where skooner-sa is service account name.

              • Find the secret that was created to hold the token for the SA

                kubectl get secrets\n
              • Show the contents of the secret to extract the token

                kubectl describe secret skooner-sa-token\n

              Copy the token value from the secret detail and enter it into the login screen to access the dashboard.

              "},{"location":"other-tools/kubernetes/kubeadm/single-master-clusters-with-kubeadm/#watch-demo-video-showing-how-to-deploy-applications","title":"Watch Demo Video showing how to deploy applications","text":"

              Here\u2019s a recorded demo video on how to deploy applications on top of setup single master K8s cluster as explained above.

              "},{"location":"other-tools/kubernetes/kubeadm/single-master-clusters-with-kubeadm/#very-important-certificates-renewal","title":"Very Important: Certificates Renewal","text":"

              Client certificates generated by kubeadm expire after one year unless the Kubernetes version is upgraded or the certificates are manually renewed.

              To renew certificates manually, you can use the kubeadm certs renew command with the appropriate command line options. After running the command, you should restart the control plane Pods.

              kubeadm certs renew can renew any specific certificate or, with the subcommand all, it can renew all of them, as shown below:

              kubeadm certs renew all\n

              Once renewing certificates is done. You must restart the kube-apiserver, kube-controller-manager, kube-scheduler and etcd, so that they can use the new certificates by running:

              systemctl restart kubelet\n

              Then, update the new kube config file:

              export KUBECONFIG=/etc/kubernetes/admin.conf\nsudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config\n

              Don't Forget to Update the older kube config file

              Update wherever you are using the older kube config to connect with the cluster.

              "},{"location":"other-tools/kubernetes/kubeadm/single-master-clusters-with-kubeadm/#clean-up","title":"Clean Up","text":"
              • To view the Cluster info:

                kubectl cluster-info\n
              • To delete your local references to the cluster:

                kubectl config delete-cluster\n
              "},{"location":"other-tools/kubernetes/kubeadm/single-master-clusters-with-kubeadm/#how-to-remove-the-node","title":"How to Remove the node?","text":"

              Talking to the control-plane node with the appropriate credentials, run:

              kubectl drain <node name> --delete-emptydir-data --force --ignore-daemonsets\n
              • Before removing the node, reset the state installed by kubeadm:

                kubeadm reset\n

                The reset process does not reset or clean up iptables rules or IPVS tables. If you wish to reset iptables, you must do so manually:

                iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X\n

                If you want to reset the IPVS tables, you must run the following command:

                ipvsadm -C\n
              • Now remove the node:

                kubectl delete node <node name>\n

              If you wish to start over, run kubeadm init or kubeadm join with the appropriate arguments.

              "}]} \ No newline at end of file diff --git a/sitemap.xml b/sitemap.xml new file mode 100644 index 00000000..affc517c --- /dev/null +++ b/sitemap.xml @@ -0,0 +1,553 @@ + + + + https://nerc-project.github.io/nerc-docs/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/about/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/get-started/create-a-user-portal-account/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/get-started/user-onboarding-on-NERC/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/get-started/allocation/adding-a-new-allocation/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/get-started/allocation/adding-a-project/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/get-started/allocation/allocation-change-request/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/get-started/allocation/allocation-details/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/get-started/allocation/archiving-a-project/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/get-started/allocation/coldfront/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/get-started/allocation/manage-users-to-a-project/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/get-started/allocation/managing-users-to-an-allocation/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/get-started/allocation/project-and-allocation-review/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/get-started/allocation/requesting-an-allocation/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/get-started/best-practices/best-practices-for-bu/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/get-started/best-practices/best-practices-for-harvard/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/get-started/best-practices/best-practices-for-my-institution/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/get-started/best-practices/best-practices/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/get-started/cost-billing/billing-faqs/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/get-started/cost-billing/billing-process-for-bu/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/get-started/cost-billing/billing-process-for-harvard/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/get-started/cost-billing/billing-process-for-my-institution/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/get-started/cost-billing/how-pricing-works/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/get-started/cost-billing/nerc-pricing-calculator/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/migration-moc-to-nerc/Step1/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/migration-moc-to-nerc/Step2/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/migration-moc-to-nerc/Step3/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/migration-moc-to-nerc/Step4/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openshift/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openshift/applications/creating-a-sample-application/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openshift/applications/creating-your-own-developer-catalog-service/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openshift/applications/deleting-applications/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openshift/applications/editing-applications/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openshift/applications/scaling-and-performance-guide/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openshift/decommission/decommission-openshift-resources/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openshift/get-started/openshift-overview/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openshift/logging-in/access-the-openshift-web-console/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openshift/logging-in/setup-the-openshift-cli/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openshift/logging-in/the-openshift-cli/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openshift/logging-in/web-console-overview/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openshift/storage/storage-overview/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openshift-ai/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openshift-ai/data-science-project/explore-the-jupyterlab-environment/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openshift-ai/data-science-project/model-serving-in-the-rhoai/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openshift-ai/data-science-project/testing-model-in-the-rhoai/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openshift-ai/data-science-project/using-projects-the-rhoai/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openshift-ai/get-started/rhoai-overview/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openshift-ai/logging-in/access-the-rhoai-dashboard/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openshift-ai/logging-in/the-rhoai-dashboard-overview/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openshift-ai/other-projects/configure-jupyter-notebook-use-gpus-aiml-modeling/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openshift-ai/other-projects/how-access-s3-data-then-download-and-analyze-it/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/access-and-security/create-a-key-pair/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/access-and-security/security-groups/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/advanced-openstack-topics/domain-name-system/domain-names-for-your-vms/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/advanced-openstack-topics/python-sdk/python-SDK/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/advanced-openstack-topics/setting-up-a-network/create-a-router/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/advanced-openstack-topics/setting-up-a-network/set-up-a-private-network/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/advanced-openstack-topics/setting-up-your-own-images/how-to-build-windows-image/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/advanced-openstack-topics/terraform/terraform-on-NERC/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/backup/backup-with-snapshots/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/create-and-connect-to-the-VM/assign-a-floating-IP/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/create-and-connect-to-the-VM/create-a-Windows-VM/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/create-and-connect-to-the-VM/flavors/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/create-and-connect-to-the-VM/images/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/create-and-connect-to-the-VM/launch-a-VM/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/create-and-connect-to-the-VM/ssh-to-the-VM/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/create-and-connect-to-the-VM/bastion-host-based-ssh/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/openvpn_gui_for_windows/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/create-and-connect-to-the-VM/using-vpn/openvpn/tunnelblick_for_macos/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/create-and-connect-to-the-VM/using-vpn/sshuttle/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/create-and-connect-to-the-VM/using-vpn/wireguard/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/data-transfer/data-transfer-from-to-vm/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/decommission/decommission-openstack-resources/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/logging-in/access-the-openstack-dashboard/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/logging-in/dashboard-overview/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/management/vm-management/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/openstack-cli/launch-a-VM-using-openstack-CLI/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/openstack-cli/openstack-CLI/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/persistent-storage/attach-the-volume-to-an-instance/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/persistent-storage/create-an-empty-volume/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/persistent-storage/delete-volumes/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/persistent-storage/detach-a-volume/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/persistent-storage/extending-volume/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/persistent-storage/format-and-mount-the-volume/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/persistent-storage/mount-the-object-storage/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/persistent-storage/object-storage/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/persistent-storage/transfer-a-volume/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/openstack/persistent-storage/volumes/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/other-tools/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/other-tools/CI-CD/CI-CD-pipeline/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/other-tools/CI-CD/github-actions/setup-github-actions-pipeline/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/other-tools/CI-CD/jenkins/integrate-your-GitHub-repository/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/other-tools/CI-CD/jenkins/setup-jenkins-CI-CD-pipeline/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/other-tools/apache-spark/spark/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/other-tools/kubernetes/comparisons/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/other-tools/kubernetes/k0s/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/other-tools/kubernetes/kind/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/other-tools/kubernetes/kubernetes/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/other-tools/kubernetes/kubespray/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/other-tools/kubernetes/microk8s/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/other-tools/kubernetes/minikube/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/other-tools/kubernetes/k3s/k3s-ha-cluster-using-k3d/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/other-tools/kubernetes/k3s/k3s-ha-cluster/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/other-tools/kubernetes/k3s/k3s-using-k3d/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/other-tools/kubernetes/k3s/k3s-using-k3sup/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/other-tools/kubernetes/k3s/k3s/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/other-tools/kubernetes/kubeadm/HA-clusters-with-kubeadm/ + 2024-11-15 + daily + + + https://nerc-project.github.io/nerc-docs/other-tools/kubernetes/kubeadm/single-master-clusters-with-kubeadm/ + 2024-11-15 + daily + + \ No newline at end of file diff --git a/sitemap.xml.gz b/sitemap.xml.gz new file mode 100644 index 00000000..bf38375b Binary files /dev/null and b/sitemap.xml.gz differ diff --git a/stylesheets/extra.css b/stylesheets/extra.css new file mode 100644 index 00000000..1b0f3db0 --- /dev/null +++ b/stylesheets/extra.css @@ -0,0 +1,62 @@ +.md-logo img { + width: 68px !important; + height: 24px !important; +} + +.md-footer { + background-color: rgb(228 228 228 / 87%); + color: var(--md-footer-fg-color); + padding: 25px 0px 25px 0px; +} + +footer [class^="footer-"] { + width: 24%; + display: inline-block; + text-align: center; +} + +footer .footer-left { + float: left; +} + +footer .footer-right { + float: right; + /* padding-top: 25px; */ +} + +footer .footer-center { + padding-top: 25px; +} + +.center { + position: absolute; + top: 50%; + transform: translate(0, -50%); + border: 5px solid #ffff00; + padding: 10px; +} + +.parent { + /* background: #CCCCCC; */ + /* height: 100px; */ + height: 55px; + /* width: 200px; */ + position: relative; +} + +.maintain { + position: absolute; + top: 5%; + /* left: 50%; */ + /* margin: -25px 0 0 -35px; */ +} + +.child { + /* background: #ffff00; + width: 70px; + height: 70px; */ + position: absolute; + top: 50%; + left: 50%; + margin: -25px 0 0 -35px; +}
+ + +