Skip to content
Snippets Groups Projects
Commit fd9c4779 authored by Mohamed, Fawzi Roberto (fawzi)'s avatar Mohamed, Fawzi Roberto (fawzi)
Browse files

finished full rewrite (no test)

parent 6c5438ad
Branches
Tags
No related merge requests found
const stringify = require('json-stringify-safe');
const http = require('http');
const fs = require('fs');
module.exports = function (config, k8, k8component) {
const reloadMsg = `<html><head><title>Starting up!</title><meta http-equiv="refresh" content="${config.app.pageReloadTime}" ></head><body><h3>Please wait while we start a container for you!</h3>
<p>You might need to refresh manually (F5)...</body></html>`;
var ProxyRouter = function(options) {
if (!options.backend) {
throw "ProxyRouter backend required. Please provide options.backend parameter!";
}
const components = require('./components');
const yaml = require('js-yaml')
const k8 = require('./kubernetes')(config);
this.client = options.backend;
this.cache_ttl = (options.cache_ttl || 10) * 1000;
this.cache = {};
this.state_info = {};
//State is only for debug and not used at the moment
this.stateEnum = {
STARTING: 1,
STARTED: 2,
AVAILABLE: 3,
STOPPED: 4
};
if (Object.freeze)
Object.freeze(this.stateEnum);
console.log("ProxyRouter cache TTL is set to " + this.cache_ttl + " ms.");
};
ProxyRouter.prototype.kubernetesServiceLookup = function(req, res, userID, isWebsocket, path, next) { //Kubernetes service names are based
var self = this;
function createService(userID) {
console.log("trying to create service for the user: " + userID);
self.set_user_state(userID,self.stateEnum.STARTING);
k8.ns(config.k8component.namespace).service.get(config.k8component.imageType + '-svc-'+userID,function(err, result) {
if(err)
k8.ns(config.k8component.namespace).service.post({ body: k8component('service', userID)}, function(err, result){
if(err){
console.log(`#ERROR# Cannot start service ${config.k8component.imageType} for the user: ${userID}`);
console.log(stringify(err, null, 2));
}
else
createReplicationController(userID);
});
else
createReplicationController(userID);
});
};
const reloadMsg = `<html><head><title>Starting up!</title><meta http-equiv="refresh" content="${config.app.pageReloadTime}" ></head><body><h3>Please wait while we start a container for you!</h3><p>You might need to refresh manually (F5)...</body></html>`;
function guaranteeDir(path, next) {
fs.access(path, fs.constants.F_OK | fs.constants.R_OK, (err) => {
......@@ -74,204 +35,91 @@ ProxyRouter.prototype.kubernetesServiceLookup = function(req, res, userID, isWeb
});
}
function createReplicationController(userID) {
console.log("trying to create replication controller for user: " + userID);
// createUserDir(userID); //Kubernetes can handle it, but the permissions can be problamatic
k8.ns(config.k8component.namespace).replicationcontrollers.get(config.k8component.imageType + '-rc-' + userID, function(err, result) {
if(err)
k8.ns(config.k8component.namespace).replicationcontrollers.post({ body: k8component('replicationController', userID)}, function(err, result){
if(err){
console.log("#ERROR# Can't start replication controller for the user: " + userID);
console.log(stringify(err, null, 2));
}
else
getServicePort(userID);
});
else
getServicePort(userID);
});
};
function getServicePort(userID) {
console.log("trying to get service part for user: " + userID);
self.set_user_state(userID,self.stateEnum.STARTED);
k8.ns(config.k8component.namespace).service.get(config.k8component.imageType + '-svc-' + userID,function(err, result) {
/// functions that either gives the running pod or starts it
function getOrCreatePod(podName, next) {
k8.ns(config.k8component.namespace).pod.get(podName, function(err, result) {
if(err) {
console.log("#ERROR# Can't find service for the user: " + userID);
console.log(stringify(err, null, 2));
}
else{
var target= {host: config.k8Api.node, port: result.spec.ports[0].nodePort};
console.log(`Resolved using kubernetes to ${stringify(target)}`)
var writeTarget = stringify(target);
var cb = function(){
http.request({method:'HEAD',host:target.host,port:target.port,path: '/'}, (r) => {
if(r.statusCode >= 200 && r.statusCode < 400 ){
console.log("Forwarding to the target!")
self.set_user_state(userID,self.stateEnum.AVAILABLE);
self.set_user_last_success(userID, Date.now());
next(target);
}
}).setTimeout(1000).on('error', (err) => {
self.push_user_error(userID, err.message);
self.clear_user_state(userID);
console.log("Sending message back to the browser!")
if (!isWebsocket){
res.send(reloadMsg);
}
}).end();
}
self.client.hset(userID, path, writeTarget,cb);
}
});
};
function writeToDisk(userID){
fs.writeFile("service.json", stringify(k8component('service', userID), null, 2), function(err) {
const info = components.infoForPodName(podName)
components.templateForImageType(info.imageType, info.user, info.shortSession, {}, function(err, template, repl) {
if(err) {
return console.log(err);
}
console.log("The service file was saved!");
});
fs.writeFile("rc.json", stringify(k8component('replicationController', userID), null, 2), function(err) {
console.log(`#ERROR# Cannot start pod ${podName}, error in template generation: ${JSON.stringify(err)}`);
next(err, null)
} else {
//fs.writeFile("pod.yaml", template)
//console.log("wrote evaulated template to pod.yaml")
guaranteeUserDir(info.user, function (){
k8.ns(config.k8component.namespace).service.post({ body: yaml.safeLoad(template, 'utf8')}, function(err, res2){
if(err) {
return console.log(err);
console.log(`#ERROR# Cannot start pod ${podName}, error: ${JSON.stringify(err)}`);
next(err, null)
} else {
console.log(`Created pod ${podName}: ${JSON.stringify(res2)}`)
next(null, res2)
}
})
})
}
console.log("The rc file was saved!");
});
} else {
console.log(`looked up ${podName}: ${JSON.stringify(result)}`)
next(null, result)
}
// writeToDisk(userID);
guaranteeUserDir(userID, function() { createService(userID) });
};
// The decision could be made using the state machine instead of the
ProxyRouter.prototype.lookup = function(req, res, userID, isWebsocket, path, next) {
console.log("Looking up the path! " + req.path)
var self = this;
if ( self.cache[userID] && self.cache[userID][path]) {
var target = self.cache[userID][path];
console.log(`Resolved using local cache to ${stringify(target)}`)
next(target);
});
}
else {
if (config.specialUsers[userID]) {
target = config.specialUsers[userID]
// Set cache and expiration
if (self.cache[userID] === undefined){
self.cache[userID] = { path: target }
} else {
self.cache[userID][path] = target;
// cache pod name -> host & port
const resolveCache = require('../safe-memory-cache/map.js')({
limit: config.resolveCacheNMax,
maxTTL: config.resolveCacheTtlMaxMs,
refreshF: function(key, value, cache) {
}
console.log(`Forwarding special user ${userID} to ${stringify(target)}!`);
next(target);
})
function resolvePod(podName, next) {
var v = resolveCache.get(podName)
if (v === undefined) {
getOrCreatePod(podName, function (err, pod) {
if (err) {
next(err, null)
} else {
//Check if the path has been defined in the redis client otherwise get it from kubernetes
self.client.hget(userID, path, function(err, data) {
if (data) {
var target = JSON.parse(data);
console.log(`Resolved using redis cache to ${stringify(target)}`)
// Set cache and expiration
if (self.cache[userID] === undefined){
self.cache[userID] = { path: target }
const portInfo = pod.spec.containers[0].ports[0]
const res = {
host: portInfo.hostIP,
port: portInfo.containerPort
}
else{
self.cache[userID][path] = target;
console.log(`got ${JSON.stringify(res)} out of pod ${JSON.stringify(pod)}`)
resolveCache.set(podName, res)
next(null, res)
}
self.expire_route(userID, self.cache_ttl);
http.request({method:'HEAD',host:target.host,port:target.port,path: '/'}, (r) => {
if(r.statusCode >= 200 && r.statusCode < 500 ){
console.log("Forwarding to the target!");
self.set_user_state(userID,self.stateEnum.AVAILABLE);
next(target);
})
} else {
self.expire_route(userID, 0);
self.push_user_error(userID, `ERROR, statusCode: ${r.statusCode}, err: ${JSON.stringify(err)}`);
self.clear_user_state(userID);
self.client.hdel(userID, path, () =>{});
console.log(`ERROR, statusCode: ${r.statusCode}, err: ${JSON.stringify(err)}, Sending message back to the browser!`)
if (!isWebsocket) {
res.send(reloadMsg);
}
next(null, v)
}
}).setTimeout(1000).on('error', (err) => {
self.expire_route(userID, 0);
self.push_user_error(userID, err.message);
self.clear_user_state(userID);
self.client.hdel(userID, path, () =>{});
console.log("From error! Sending message back to the browser!")
if (!isWebsocket) {
res.send(reloadMsg);
}
}).end();
} else { //Else of path check in redis client
//Lookup target from Kubernetes
console.log(`Cant resolve using redis cache!!`)
self.kubernetesServiceLookup(req, res, userID, isWebsocket, path, next);
}
});
}
var ProxyRouter = function(options) {
if (!options.backend) {
throw "ProxyRouter backend required. Please provide options.backend parameter!";
}
this.client = options.backend;
};
ProxyRouter.prototype.flush_state_info = function() {
this.state_info = {};
};
ProxyRouter.prototype.clear_user_state = function(userID) {
// console.log("Changing state to: STOPPED" )
if (!this.state_info[userID] === undefined)
this.state_info[userID]['STATE'] = this.stateEnum.STOPPED;
};
ProxyRouter.prototype.set_user_state = function(userID, state) {
// console.log("Changing state to: " + state)
if (this.state_info[userID] === undefined)
this.state_info[userID] = { STATE: state }
else
this.state_info[userID]['STATE'] = state;
};
ProxyRouter.prototype.set_user_last_success = function(userID, s) {
if (this.state_info[userID] === undefined)
this.state_info[userID] = { LASTSUCCESS: s }
else
this.state_info[userID]['LASTSUCCESS'] = s;
};
ProxyRouter.prototype.push_user_error = function(userID, err) {
if (this.state_info[userID] === undefined)
this.state_info[userID] = { ERROR: [err] };
else if (this.state_info[userID]['ERROR'] === undefined)
this.state_info[userID]['ERROR'] = [err] ;
else
this.state_info[userID]['ERROR'].unshift(err);
while(this.state_info[userID]['ERROR'].length > config.app.maxErrorQueue){
this.state_info[userID]['ERROR'].pop();
// The decision could be made using the state machine instead of the
ProxyRouter.prototype.lookup = function(req, res, userID, isWebsocket, path, next) {
console.log("Looking up the path! " + req.path)
if (!req.session.shortSession)
req.session.shortSession = components.shortSession(req.sessionID)
const shortSession = req.session.shortSession
const podName = components.podNameForImageType(config.k8component.imageType, userID, shortSession)
resolvePod(podName, function (err, target) {
if (err) {
console.log(`ERROR ${JSON.stringify(err)}`)
} else {
console.log(`Resolved to ${stringify(target)}`)
next(target);
}
})
};
ProxyRouter.prototype.expire_route = function(hostname, ttl) {
var self = this;
setTimeout(function() {
self.flush_route(hostname);
}, ttl);
};
ProxyRouter.prototype.flush = function() {
this.cache = {};
};
ProxyRouter.prototype.flush_route = function(hostname) {
delete(this.cache[hostname]);
};
ProxyRouter.prototype.expire_route = function(hostname, ttl) {
var self = this;
setTimeout(function() {
self.flush_route(hostname);
}, ttl);
};
return(ProxyRouter);
};
module.exports = ProxyRouter
......@@ -4,7 +4,8 @@ const fs = require('fs');
const path = require('path');
const baseDir = path.resolve(__dirname, '..')
const cconfig = config.k8component
const userSettings = require('userSettings')
const userSettings = require('./userSettings')
const crypto = require('crypto');
var baseRepl = {
baseDir: baseDir
......@@ -30,15 +31,14 @@ function loadTemplateInternal(templatePath, next) {
}
const templateCache = require('../safe-memory-cache/map.js')({
limit = cconfig.templateCacheNMax,
opts.maxTTL = cconfig.templateCacheTtlMaxMs,
refreshF = function(key, value) {
limit: cconfig.templateCacheNMax,
maxTTL: cconfig.templateCacheTtlMaxMs,
refreshF: function(key, value, cache) {
loadTemplateInternal(key, function(err, t) {
if (err) {
console.log(`refresh of template ${key} failed with ${JSON.stringify(err)}, keeping old value`)
this.set(key, value)
console.log(`refresh of template ${key} failed with ${JSON.stringify(err)}`)
} else {
this.set(key,t)
cache.set(key,t)
}
})
}
......@@ -54,7 +54,7 @@ function loadTemplate(templatePath, next) {
templateCache.set(templatePath, null)
next(err, null)
} else {
this.set(templatePath, t)
templateCache.set(templatePath, t)
next(null, t)
}
})
......@@ -81,38 +81,92 @@ function namespaceTemplate(name, next) {
evalTemplate("namespace.yaml", { namespace: name }, next)
}
/// Returns the template corrsponding
function templateForImageType(imageType, user, extraRepl, next) {
/// returns a short session ID from a long session id
function shortSession(sessionID) {
const hash = crypto.createHash('sha512');
hash.update(req.sessionID)
return hash.digest('base64').slice(0,14).replace('+','-').replace('/','_')
}
/// returns the name of the pod for the give user/session
function podNameForImageType(imageType, user, shortSession) {
var session = cconfig.images[imageType].containerPerSession
if (session !== true && session !== false)
session = cconfig.containerPerSession
if (session)
return `${imageType}-${user}-${shortSession}`
else
return`${imageType}-${user}`
}
/// returns the keys (user,...) for the given pod name
function infoForPodName(podName) {
const imageType = podName.slice(0,podName.indexOf('-'))
var session = cconfig.images[imageType].containerPerSession
if (session !== true && session !== false)
session = cconfig.containerPerSession
if (session)
return {
imageType: imageType,
user: podName.slice(imageType.length + 1, podName.length - 15),
shortSession: podName.slice(podName.length - 14)
}
else
return {
imageType: imageType,
user: podName.slice(imageType.length + 1)
}
}
/// gives the replacements for the image type and user
function replacementsForImageType(imageType, user, shortSession, extraRepl, next) {
var repl = {}
var keysToProtect = new Set()
var toSkip
function addRepl(dict) {
if (dict.keysToSkip)
if (dict && dict.keysToSkip)
toSkip = new Set([...keysToProtect, ...dict.keysToSkip])
else
toSkip = new Set(keysToProtect)
for (k in cconfig)
for (k in dict)
if (!toSkip.has(k))
rep[k] = config[k]
if (dict.keysToProtect)
repl[k] = dict[k]
if (dict && dict.keysToProtect)
for (k in dict.keysToProtect)
keysToProtect.add(k)
}
addRepl(cconfig)
addRepl(cconfig[imageType])
const userRepl = userSettings.getSettings(user, 'image:' + imageType)
addRepl(cconfig.images[imageType])
const userRepl = userSettings.getAppSetting(user, 'image:' + imageType)
addRepl(userRepl)
// extraRepl overrides even protected values
for (k <- extraRepl)
repl[k] = userRepl[k]
// and the "real" user overrided everything
if (extraRepl)
for (k in extraRepl)
repl[k] = extraRepl[k]
// "real" user imageType and podName overrided everything
repl['user'] = user
repl['imageType'] = imageType
repl['shortSession'] = shortSession
repl['podName'] = podNameForImageType(imageType, user, shortSession)
next(null, repl)
}
function templateForImageType(imageType, user, shortSession, extraRepl, next) {
replacementsForImageType(imageType, user, shortSession, extraRepl, function(err, repl) {
if (err)
next(err, null, null)
else
evalTemplate(repl['templatePath'], repl, next)
})
}
module.exports = {
evalTemplate: evalTemplate,
namespaceTemplate: namespaceTemplate,
shortSession: shortSession,
replacementsForImageType: replacementsForImageType,
podNameForImageType: podNameForImageType,
infoForPodName: infoForPodName,
templateForImageType: templateForImageType
}
//const config = require('config')
//const redis = require('redis')
function userSettings(app, user, key) {
/// Returns the settings key of the given app and user
function getSetting(app, user, key) {
return {}
}
module.exports = {
getOtherSetting: userSettings
getSetting: function(user, key) {
userSettings('container-manager', user, key)
/// Returns the settings of this app for the given user and key
function getAppSetting(user, key) {
getSetting('container-manager', user, key)
}
module.exports = {
getSetting: getSetting,
getAppSetting: getAppSetting
}
......@@ -109,12 +109,9 @@ module.exports = function(env,config, models, cmds) {
if (cmds.includes('webserver')) {
console.log('starting webserver')
const k8 = require('./kubernetes')(config);
const k8component = require('./components')(config);
const ProxyRouter = require('./ProxyRouter')(config,k8, k8component)
const ProxyRouter = require('./ProxyRouter')
const proxyRouter = new ProxyRouter({
backend: client,
cache_ttl: 10
backend: client
});
//Agent is need to keep the connection: keep-alive header. But we not using it until really needed.
//const agent = new http.Agent({ maxSockets: Number.MAX_VALUE });
......
......@@ -5,7 +5,8 @@ app: {
localOverride: "/localoverride"
maxErrorQueue: 10
redisTimeout: 3600
localCacheTimeout: 10
resolveCacheTtlMaxMs: 30000
resolveCacheNMax: 30000
pageReloadTime: 5
frontendAddr: "http://127.0.0.1:4200"
baseUri: "http://127.0.0.1"
......@@ -27,9 +28,9 @@ usersettings_redis: {
k8Api: {
url: "https://labdev3-nomad.esc.rzg.mpg.de:6443"
node: "labdev3-nomad.esc.rzg.mpg.de"
ca: "/usr/src/app/certs/ca.cert"
cert: "/nomad/nomadlab/servers/labdev-nomad/kubernetes/kubelet-client.crt"
key: "/nomad/nomadlab/servers/labdev-nomad/kubernetes/kubelet-client.key"
ca: "certs/ca.crt"
cert: "certs/client.crt"
key: "certs/client.key"
}
passport: {
strategy: "saml"
......@@ -48,26 +49,31 @@ k8component: {
templatePath: "defaultTemplate.yaml"
keysToProtect: ["keysToProtect", "keysToSkip"]
keysToSkip: ["templateCacheTtlMaxMs", "templateCacheNMax", "keysToSkip", "keysToProtect", "images"]
containerPerSession: false
images: {
beaker: {
keysToProtect: ["containerPerSession"]
image: "labdev-nomad.esc.rzg.mpg.de:5000/nomadlab/notebook:v1.8.0-214-gdd60aa28-dirty",
port: 8801,
prefix: "/beaker",
homePath: "/home/beaker"
},
jupyter: {
keysToProtect: ["containerPerSession"]
image: "labdev-nomad.esc.rzg.mpg.de:5000/nomadlab/notebook-jupyter-libatoms-tutorial:v0.4",
port: 8888,
prefix: "/jupyter",
homePath: "/home/beaker"
},
creedo: {
keysToProtect: ["containerPerSession"]
image: "labdev-nomad.esc.rzg.mpg.de:5000/nomadlab/creedo:v0.4.2-2017-09-29",
port: 8080,
prefix: "/Creedo",
homePath: "/home/creedo"
},
remotevis: {
keysToProtect: ["containerPerSession"]
image: "labdev-nomad.esc.rzg.mpg.de:5000/nomadlab/notebook-jupyter-libatoms-tutorial",
port: 8888,
prefix: "/jupyter",
......
version=$(git describe --tags --always --dirty)
name="labdev-nomad.esc.rzg.mpg.de:5000/nomadlab/nomad-container-manager-$version"
docker build -t $name .
docker push $name
cat >container-manager-namespace.yaml <<EOF
kind: Namespace
apiVersion: v1
metadata:
name: analytics
EOF
cat >container-manager-deploy.yaml <<HERE
apiVersion: apps/v1beta2
kind: Deployment
metadata:
name: nomad-container-manager
namespace: analytics
labels:
app: nomad-container-manager
spec:
replicas: 1
selector:
matchLabels:
app: nomad-container-manager
template:
metadata:
labels:
app: nomad-container-manager
spec:
containers:
- name: nomad-container-manager
image: $name
ports:
- containerPort: 80
HERE
cat >container-manager-service.yaml <<HERE
kind: Service
apiVersion: v1
metadata:
name: nomad-container-manager
spec:
selector:
app: nomad-container-manager
ports:
- protocol: TCP
port: 80
targetPort: 80
type: NodePort
HERE
echo "* For an initial deployment, launch with (choose configuration accordingly):"
echo "kubectl --kubeconfig /nomad/nomadlab/kubernetes/dev/config create -f container-manager-service.yaml"
echo "kubectl --kubeconfig /nomad/nomadlab/kubernetes/dev/config create -f container-manager-deploy.yaml"
echo "* To siply update the deployment, just update the Kubernetes deployment without touching the service:"
echo "kubectl --kubeconfig /nomad/nomadlab/kubernetes/dev/config delete -f container-manager-deploy.yaml"
echo "kubectl --kubeconfig /nomad/nomadlab/kubernetes/dev/config create -f container-manager-deploy.yaml"
---
{
"apiVersion": "v1",
"kind": "ReplicationController",
"metadata": {
"name": imageType + "-rc-" + user,
"labels": {
"user": user,
"app": imageType
},
},
"spec": {
"replicas": 1,
"selector":{
"user": user,
"app": imageType
},
"template": {
"metadata": {
"labels": {
"user":user,
"app": imageType
}
},
"spec": {
"containers": [
{
"image": imageInfo.image,
"name": imageType,
"ports": [
{
"containerPort": imageInfo.port,
"name": "main-port",
"protocol": "TCP"
}
],
"imagePullPolicy": "IfNotPresent",
"volumeMounts": [
{
"mountPath": "/raw-data",
"name": "raw-data-volume",
"readOnly": true
},
{
"mountPath": "/parsed",
"name": "parsed-data-volume",
"readOnly": true
},
{
"mountPath": "/normalized",
"name": "normalized-data-volume",
"readOnly": true
},
{
"mountPath": imageInfo.homePath+"/notebooks",
"name": "notebooks-data-volume",
"readOnly": true
},
{
"mountPath": config.userInfo.privateDirInContainer+ "/" + user,
"name": "private-data-volume"
},
{
"mountPath": config.userInfo.sharedDirInContainer,
"name": "shared-data-volume",
"readOnly": true
},
{
"mountPath": config.userInfo.sharedDirInContainer + "/" + user,
"name": "my-shared-data-volume"
}
]
}
]
,
volumes: [
{
"name": "parsed-data-volume",
"hostPath": { "path": "/nomad/nomadlab/parsed" }
},
{
"name": "raw-data-volume",
"hostPath": { "path": "/nomad/nomadlab/raw-data"}
},
{
"name": "normalized-data-volume",
"hostPath": { "path": "/nomad/nomadlab/normalized" }
},
{
"name": "notebooks-data-volume",
"hostPath": { "path": "/nomad/nomadlab/beaker-notebooks/notebooks" }
},
{
"name": "private-data-volume",
"hostPath": { "path": config.userInfo.privateDir + '/' + user }
},
{
"name": "shared-data-volume",
"hostPath": { "path": config.userInfo.sharedDir }
},
{
"name": "my-shared-data-volume",
"hostPath": { "path": config.userInfo.sharedDir + '/' + user }
}
]
}
}
}
};
apiVersion: "v1"
kind: "ReplicationController",
kind: Pod
labels:
user: "{{user}}"
app: "{{imageType}}"
spec:
containers:
- image: "{{image}}"
name: "{{imageType}}"
ports:
- containerPort: "{{port}}"
name: "main-port"
protocol: "TCP"
imagePullPolicy: "IfNotPresent"
volumeMounts:
- mountPath: "/raw-data"
name: "raw-data-volume"
readOnly: true
- mountPath: "/parsed"
name: "parsed-data-volume"
readOnly: true
- mountPath: "/normalized"
name: "normalized-data-volume"
readOnly: true
- mountPath: "{{homePath}}/notebooks"
name: notebooks-data-volume
readOnly: true
- mountPath: "/data/private/{{user}}"
name: "private-data-volume"
- mountPath: "/data/shared"
name: "shared-data-volume"
readOnly: true
- mountPath: "/data/shared/{{user}}"
name: "my-shared-data-volume"
volumes:
- name: "parsed-data-volume"
hostPath: { "path": "/nomad/nomadlab/parsed" }
- name: "raw-data-volume"
hostPath: { "path": "/nomad/nomadlab/raw-data"}
- name: "normalized-data-volume"
hostPath: { "path": "/nomad/nomadlab/normalized" }
- name: "notebooks-data-volume"
hostPath: { "path": "/nomad/nomadlab/beaker-notebooks/notebooks" }
- name: "private-data-volume"
hostPath: { "path": "{{userDataHostPath}}/private/{{user}}" }
- name: "shared-data-volume"
hostPath: { "path": "{{userDataHostPath}}/shared" }
- name: "my-shared-data-volume"
hostPath: { "path": "{{userDataHostPath}}/shared/{{user}}" }
---
kind: Namespace
apiVersion: v1
metadata:
......
......@@ -36,6 +36,7 @@
"hjson": "^3.1.0",
"http-proxy": "^1.15.2",
"jade": "1.11.0",
"js-yaml": "^3.10.0",
"json-stringify-safe": "^5.0.1",
"kubernetes-client": "^3.18.0",
"memory-store": "0.0.1",
......
const repl = require('repl')
const config = require('config');
const k8 = require('./app/kubernetes')(config);
const compontents = require('./app/components')
const components = require('./app/components')
var ns = k8.ns(config.k8component.namespace)
function getService() {
ns.service.get(config.k8component.imageType + '-svc-fawzi2')
}
function testComponent() {
components.templateForImageType("beaker", "fawzi2", {'session': 'pippo'}, function(err,data,repl) {
console.log(`err: ${JSON.stringify(err)}, data: ${data}, repl: ${JSON.stringify(repl)}`)
})
}
const r = repl.start('> ');
r.context.config = config
r.context.k8 = k8
r.context.ns = ns
r.context.getService = getService
r.context.components = components
r.context.testComponent = testComponent
safe-memory-cache @ e83ed463
Subproject commit 15c65058f9d1e82e8a5415cfa6e678c2edb9946d
Subproject commit e83ed463f583914124f9f9a489b4ae08a1d8989c
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment