diff --git a/app/ProxyRouter.js b/app/ProxyRouter.js
index 22d3a61dadc5bbb9a800c4bb13ccbc5a3958b8a7..4e3c3dd6fe7325b3bca395bc993d7ff833e86127 100644
--- a/app/ProxyRouter.js
+++ b/app/ProxyRouter.js
@@ -1,277 +1,125 @@
 const stringify = require('json-stringify-safe');
 const http = require('http');
 const fs = require('fs');
-module.exports = function (config, k8, k8component) {
-const reloadMsg = `<html><head><title>Starting up!</title><meta http-equiv="refresh" content="${config.app.pageReloadTime}" ></head><body><h3>Please wait while we start a container for you!</h3>
-<p>You might need to refresh manually (F5)...</body></html>`;
-var ProxyRouter = function(options) {
-  if (!options.backend) {
-    throw "ProxyRouter backend required. Please provide options.backend parameter!";
-  }
-
-  this.client    = options.backend;
-  this.cache_ttl = (options.cache_ttl || 10) * 1000;
-  this.cache     = {};
-  this.state_info = {};
-  //State is only for debug and not used at the moment
-  this.stateEnum = {
-    STARTING: 1,
-    STARTED: 2,
-    AVAILABLE: 3,
-    STOPPED: 4
-  };
-  if (Object.freeze)
-    Object.freeze(this.stateEnum);
-  console.log("ProxyRouter cache TTL is set to " + this.cache_ttl + " ms.");
-};
+const components = require('./components');
+const yaml = require('js-yaml')
+const k8 = require('./kubernetes')(config);
 
-ProxyRouter.prototype.kubernetesServiceLookup = function(req, res, userID, isWebsocket, path, next) { //Kubernetes service names are based
-  var self = this;
+const reloadMsg = `<html><head><title>Starting up!</title><meta http-equiv="refresh" content="${config.app.pageReloadTime}" ></head><body><h3>Please wait while we start a container for you!</h3><p>You might need to refresh manually (F5)...</body></html>`;
 
-  function createService(userID) {
-    console.log("trying to create service for the user: " + userID);
-    self.set_user_state(userID,self.stateEnum.STARTING);
-    k8.ns(config.k8component.namespace).service.get(config.k8component.imageType + '-svc-'+userID,function(err, result) {
-      if(err)
-        k8.ns(config.k8component.namespace).service.post({ body: k8component('service', userID)}, function(err, result){
-          if(err){
-            console.log(`#ERROR# Cannot start service ${config.k8component.imageType} for the user: ${userID}`);
-            console.log(stringify(err, null, 2));
-          }
-          else
-            createReplicationController(userID);
+  function guaranteeDir(path, next) {
+    fs.access(path, fs.constants.F_OK | fs.constants.R_OK, (err) => {
+      if(err){
+        fs.mkdir(path, parseInt('2775', 8), (err) => {
+          if(err) throw err;
+          fs.chown(path, 1000, 1000, (err) => {
+            if (err)
+              console.log('Dir '+ path + ' created, error in chown: ' + JSON.stringify(err));
+            else
+              console.log('Dir correctly created:' + path);
+            next();
+          });
         });
-      else
-        createReplicationController(userID);
+      } else {
+        next();
+      }
     });
-  };
-
-  function guaranteeDir(path, next) {
-      fs.access(path, fs.constants.F_OK | fs.constants.R_OK, (err) => {
-          if(err){
-              fs.mkdir(path, parseInt('2775', 8), (err) => {
-                  if(err) throw err;
-                  fs.chown(path, 1000, 1000, (err) => {
-                      if (err)
-                          console.log('Dir '+ path + ' created, error in chown: ' + JSON.stringify(err));
-                      else
-                          console.log('Dir correctly created:' + path);
-                      next();
-                  });
-              });
-          } else {
-              next();
-          }
-      });
   }
 
   function guaranteeUserDir(userID, next) {
-      //Async version needs to be tested thorougly
-      guaranteeDir(config.userInfo.sharedDir + '/' + userID, function() {
-          guaranteeDir(config.userInfo.privateDir + '/' + userID, function() {
-              next();
-          });
+    //Async version needs to be tested thorougly
+    guaranteeDir(config.userInfo.sharedDir + '/' + userID, function() {
+      guaranteeDir(config.userInfo.privateDir + '/' + userID, function() {
+        next();
       });
+    });
   }
 
-  function createReplicationController(userID) {
-    console.log("trying to create replication controller for user: " + userID);
-//	createUserDir(userID); //Kubernetes can handle it, but the permissions can be problamatic
-    k8.ns(config.k8component.namespace).replicationcontrollers.get(config.k8component.imageType + '-rc-' + userID, function(err, result) {
-      if(err)
-        k8.ns(config.k8component.namespace).replicationcontrollers.post({ body: k8component('replicationController', userID)}, function(err, result){
-          if(err){
-            console.log("#ERROR# Can't start replication controller for the user: " + userID);
-            console.log(stringify(err, null, 2));
+  
+  /// functions that either gives the running pod or starts it
+  function getOrCreatePod(podName, next) {
+    k8.ns(config.k8component.namespace).pod.get(podName, function(err, result) {
+      if(err) {
+        const info = components.infoForPodName(podName)
+        components.templateForImageType(info.imageType, info.user, info.shortSession, {}, function(err, template, repl) {
+          if(err) {
+            console.log(`#ERROR# Cannot start pod ${podName}, error in template generation: ${JSON.stringify(err)}`);
+            next(err, null)
+          } else {
+            //fs.writeFile("pod.yaml", template)
+            //console.log("wrote evaulated template to pod.yaml")
+            guaranteeUserDir(info.user, function (){
+              k8.ns(config.k8component.namespace).service.post({ body: yaml.safeLoad(template, 'utf8')}, function(err, res2){
+                if(err) {
+                  console.log(`#ERROR# Cannot start pod ${podName}, error: ${JSON.stringify(err)}`);
+                  next(err, null)
+                } else {
+                  console.log(`Created pod ${podName}: ${JSON.stringify(res2)}`)
+                  next(null, res2)
+                }
+              })
+            })
           }
-          else
-          getServicePort(userID);
         });
-      else
-        getServicePort(userID);
-    });
-  };
-
-  function getServicePort(userID) {
-    console.log("trying to get service part for user: " + userID);
-    self.set_user_state(userID,self.stateEnum.STARTED);
-    k8.ns(config.k8component.namespace).service.get(config.k8component.imageType + '-svc-' + userID,function(err, result) {
-      if(err){
-        console.log("#ERROR# Can't find service for the user: " + userID);
-        console.log(stringify(err, null, 2));
+      } else {
+        console.log(`looked up ${podName}: ${JSON.stringify(result)}`)
+        next(null, result)
       }
-      else{
-          var target= {host: config.k8Api.node, port: result.spec.ports[0].nodePort};
-          console.log(`Resolved using kubernetes to ${stringify(target)}`)
-          var writeTarget = stringify(target);
-          var cb = function(){
-            http.request({method:'HEAD',host:target.host,port:target.port,path: '/'}, (r) => {
-                if(r.statusCode >= 200 && r.statusCode < 400 ){
-                  console.log("Forwarding to the target!")
-                  self.set_user_state(userID,self.stateEnum.AVAILABLE);
-                  self.set_user_last_success(userID, Date.now());
-                  next(target);
-                }
-            }).setTimeout(1000).on('error', (err) => {
-              self.push_user_error(userID, err.message);
-              self.clear_user_state(userID);
-              console.log("Sending message back to the browser!")
-              if (!isWebsocket){
-                res.send(reloadMsg);
-              }
-            }).end();
-          }
-          self.client.hset(userID, path, writeTarget,cb);
-        }
-    });
-  };
-  function writeToDisk(userID){
-    fs.writeFile("service.json", stringify(k8component('service', userID), null, 2), function(err) {
-        if(err) {
-            return console.log(err);
-        }
-        console.log("The service file was saved!");
-    });
-    fs.writeFile("rc.json", stringify(k8component('replicationController', userID), null, 2), function(err) {
-        if(err) {
-            return console.log(err);
-        }
-        console.log("The rc file was saved!");
     });
   }
-//    writeToDisk(userID);
-    guaranteeUserDir(userID, function() { createService(userID) });
-};
-// The decision could be made using the state machine instead of the
-ProxyRouter.prototype.lookup = function(req, res, userID, isWebsocket, path, next) {
-  console.log("Looking up the path! " + req.path)
-  var self = this;
-  if ( self.cache[userID] && self.cache[userID][path]) {
-    var target = self.cache[userID][path];
-    console.log(`Resolved using local cache to ${stringify(target)}`)
-    next(target);
-  }
-    else {
-	if (config.specialUsers[userID]) {
-	    target = config.specialUsers[userID]
-       		    // Set cache and expiration
-            if (self.cache[userID] === undefined){
-	      self.cache[userID] = { path: target }
-	    } else {
-	      self.cache[userID][path] = target;
-            }
-            console.log(`Forwarding special user ${userID} to ${stringify(target)}!`);
-            next(target);
-	} else {
-	    //Check if the path has been defined in the redis client otherwise get it from kubernetes
-	    self.client.hget(userID, path, function(err, data) {
-		if (data) {
-		    var target = JSON.parse(data);
-		    console.log(`Resolved using redis cache to ${stringify(target)}`)
-		    // Set cache and expiration
-		    if (self.cache[userID] === undefined){
-			self.cache[userID] = { path: target }
-		    }
-		    else{
-			self.cache[userID][path] = target;
-		    }
-		    self.expire_route(userID, self.cache_ttl);
-		    http.request({method:'HEAD',host:target.host,port:target.port,path: '/'}, (r) => {
-			if(r.statusCode >= 200 && r.statusCode < 500 ){
-			    console.log("Forwarding to the target!");
-			    self.set_user_state(userID,self.stateEnum.AVAILABLE);
-			    next(target);
-			} else {
-			    self.expire_route(userID, 0);
-			    self.push_user_error(userID, `ERROR, statusCode: ${r.statusCode}, err: ${JSON.stringify(err)}`);
-			    self.clear_user_state(userID);
-			    self.client.hdel(userID, path, () =>{});
-			    console.log(`ERROR, statusCode: ${r.statusCode}, err: ${JSON.stringify(err)}, Sending message back to the browser!`)
-			    if (!isWebsocket) {
-				res.send(reloadMsg);
-			    }
-			}
-		    }).setTimeout(1000).on('error', (err) => {
-			self.expire_route(userID, 0);
-			self.push_user_error(userID, err.message);
-			self.clear_user_state(userID);
-			self.client.hdel(userID, path, () =>{});
-			console.log("From error! Sending message back to the browser!")
-			if (!isWebsocket) {
-			    res.send(reloadMsg);
-			}
-		    }).end();
-
-		} else { //Else of path check in redis client
-		    //Lookup target from Kubernetes
-		    console.log(`Cant resolve using redis cache!!`)
-		    self.kubernetesServiceLookup(req, res, userID, isWebsocket, path, next);
-		}
-	    });
-	}
-  }
-};
-
-
-ProxyRouter.prototype.flush_state_info = function() {
-  this.state_info = {};
-};
-
-ProxyRouter.prototype.clear_user_state = function(userID) {
-//  console.log("Changing state to: STOPPED" )
-  if (!this.state_info[userID] === undefined)
-    this.state_info[userID]['STATE'] = this.stateEnum.STOPPED;
-};
-
-ProxyRouter.prototype.set_user_state = function(userID, state) {
-//  console.log("Changing state to: " + state)
-  if (this.state_info[userID] === undefined)
-    this.state_info[userID] = { STATE: state }
-  else
-    this.state_info[userID]['STATE'] = state;
-};
-
-ProxyRouter.prototype.set_user_last_success = function(userID, s) {
-
-  if (this.state_info[userID] === undefined)
-    this.state_info[userID] = { LASTSUCCESS: s }
-  else
-    this.state_info[userID]['LASTSUCCESS'] = s;
-};
-
-ProxyRouter.prototype.push_user_error = function(userID, err) {
-  if (this.state_info[userID] === undefined)
-    this.state_info[userID] = { ERROR: [err] };
-  else if (this.state_info[userID]['ERROR'] === undefined)
-    this.state_info[userID]['ERROR'] = [err] ;
-  else
-    this.state_info[userID]['ERROR'].unshift(err);
-  while(this.state_info[userID]['ERROR'].length > config.app.maxErrorQueue){
-    this.state_info[userID]['ERROR'].pop();
+  
+  // cache pod name -> host & port
+  const resolveCache = require('../safe-memory-cache/map.js')({
+    limit: config.resolveCacheNMax,
+    maxTTL: config.resolveCacheTtlMaxMs,
+    refreshF: function(key, value, cache) {
+    }
+  })
+  
+  function resolvePod(podName, next) {
+    var v = resolveCache.get(podName)
+    if (v === undefined) {
+      getOrCreatePod(podName, function (err, pod) {
+        if (err) {
+          next(err, null)
+        } else {
+          const portInfo = pod.spec.containers[0].ports[0]
+          const res = {
+            host: portInfo.hostIP,
+            port: portInfo.containerPort
+          }
+          console.log(`got ${JSON.stringify(res)} out of pod ${JSON.stringify(pod)}`)
+          resolveCache.set(podName, res)
+          next(null, res)
+        }
+      })
+    } else {
+      next(null, v)
+    }
   }
-};
 
-ProxyRouter.prototype.expire_route = function(hostname, ttl) {
-  var self = this;
-  setTimeout(function() {
-    self.flush_route(hostname);
-  }, ttl);
-};
-
-ProxyRouter.prototype.flush = function() {
-  this.cache = {};
-};
-
-ProxyRouter.prototype.flush_route = function(hostname) {
-  delete(this.cache[hostname]);
-};
+  var ProxyRouter = function(options) {
+    if (!options.backend) {
+      throw "ProxyRouter backend required. Please provide options.backend parameter!";
+    }
+    this.client    = options.backend;
+  };
 
-ProxyRouter.prototype.expire_route = function(hostname, ttl) {
-  var self = this;
-  setTimeout(function() {
-    self.flush_route(hostname);
-  }, ttl);
-};
+  // The decision could be made using the state machine instead of the
+  ProxyRouter.prototype.lookup = function(req, res, userID, isWebsocket, path, next) {
+    console.log("Looking up the path! " + req.path)
+    if (!req.session.shortSession)
+      req.session.shortSession = components.shortSession(req.sessionID)
+    const shortSession = req.session.shortSession
+    const podName = components.podNameForImageType(config.k8component.imageType, userID, shortSession)
+    resolvePod(podName, function (err, target) {
+      if (err) {
+        console.log(`ERROR ${JSON.stringify(err)}`)
+      } else {
+        console.log(`Resolved to ${stringify(target)}`)
+        next(target);
+      }
+    })
+  };
 
-return(ProxyRouter);
-};
+module.exports = ProxyRouter
diff --git a/app/components.js b/app/components.js
index 6e4864957c23ccbeabf532e665885719881a23ea..65959d4a9871b6e61f13c8757e60ce26e3dc9d4b 100644
--- a/app/components.js
+++ b/app/components.js
@@ -4,7 +4,8 @@ const fs = require('fs');
 const path = require('path');
 const baseDir = path.resolve(__dirname, '..')
 const cconfig = config.k8component
-const userSettings = require('userSettings')
+const userSettings = require('./userSettings')
+const crypto = require('crypto');
 
 var baseRepl = {
   baseDir: baseDir
@@ -30,15 +31,14 @@ function loadTemplateInternal(templatePath, next) {
 }
 
 const templateCache = require('../safe-memory-cache/map.js')({
-  limit = cconfig.templateCacheNMax,
-  opts.maxTTL = cconfig.templateCacheTtlMaxMs,
-  refreshF = function(key, value) {
+  limit: cconfig.templateCacheNMax,
+  maxTTL: cconfig.templateCacheTtlMaxMs,
+  refreshF: function(key, value, cache) {
     loadTemplateInternal(key, function(err, t) {
       if (err) {
-        console.log(`refresh of template ${key} failed with ${JSON.stringify(err)}, keeping old value`)
-        this.set(key, value)
+        console.log(`refresh of template ${key} failed with ${JSON.stringify(err)}`)
       } else {
-        this.set(key,t)
+        cache.set(key,t)
       }
     })
   }
@@ -54,7 +54,7 @@ function loadTemplate(templatePath, next) {
         templateCache.set(templatePath, null)
         next(err, null)
       } else {
-        this.set(templatePath, t)
+        templateCache.set(templatePath, t)
         next(null, t)
       }
     })
@@ -81,38 +81,92 @@ function namespaceTemplate(name, next) {
   evalTemplate("namespace.yaml", { namespace: name }, next)
 }
 
-/// Returns the template corrsponding 
-function templateForImageType(imageType, user, extraRepl, next) {
+/// returns a short session ID from a long session id
+function shortSession(sessionID) {
+  const hash = crypto.createHash('sha512');
+  hash.update(req.sessionID)
+  return hash.digest('base64').slice(0,14).replace('+','-').replace('/','_')
+}
+
+/// returns the name of the pod for the give user/session
+function podNameForImageType(imageType, user, shortSession) {
+  var session = cconfig.images[imageType].containerPerSession
+  if (session !== true && session !== false)
+    session = cconfig.containerPerSession
+  if (session)
+    return `${imageType}-${user}-${shortSession}`
+  else
+    return`${imageType}-${user}`
+}
+
+/// returns the keys (user,...) for the given pod name
+function infoForPodName(podName) {
+  const imageType = podName.slice(0,podName.indexOf('-'))
+  var session = cconfig.images[imageType].containerPerSession
+  if (session !== true && session !== false)
+    session = cconfig.containerPerSession
+  if (session)
+    return {
+      imageType: imageType,
+      user: podName.slice(imageType.length + 1, podName.length - 15),
+      shortSession: podName.slice(podName.length - 14)
+    }
+  else
+    return {
+      imageType: imageType,
+      user: podName.slice(imageType.length + 1)
+    }
+}
+
+/// gives the replacements for the image type and user
+function replacementsForImageType(imageType, user, shortSession, extraRepl, next) {
   var repl = {}
   var keysToProtect = new Set()
   var toSkip
   function addRepl(dict) {
-    if (dict.keysToSkip)
+    if (dict && dict.keysToSkip)
       toSkip = new Set([...keysToProtect, ...dict.keysToSkip])
     else
       toSkip = new Set(keysToProtect)
-    for (k in cconfig)
+    for (k in dict)
       if (!toSkip.has(k))
-        rep[k] = config[k]
-    if (dict.keysToProtect)
+        repl[k] = dict[k]
+    if (dict && dict.keysToProtect)
       for (k in dict.keysToProtect)
         keysToProtect.add(k)
   }
   addRepl(cconfig)
-  addRepl(cconfig[imageType])
-  const userRepl = userSettings.getSettings(user, 'image:' + imageType)
+  addRepl(cconfig.images[imageType])
+  const userRepl = userSettings.getAppSetting(user, 'image:' + imageType)
   addRepl(userRepl)
   // extraRepl overrides even protected values
-  for (k <- extraRepl)
-    repl[k] = userRepl[k]
-  // and the "real" user overrided everything
+  if (extraRepl)
+    for (k in extraRepl)
+      repl[k] = extraRepl[k]
+  // "real" user imageType and podName overrided everything
   repl['user'] = user
+  repl['imageType'] = imageType
+  repl['shortSession'] = shortSession
+  repl['podName'] = podNameForImageType(imageType, user, shortSession)
+
+  next(null, repl)
+}
 
-  evalTemplate(repl['templatePath'], repl, next)
+function templateForImageType(imageType, user, shortSession, extraRepl, next) {
+  replacementsForImageType(imageType, user, shortSession, extraRepl, function(err, repl) {
+    if (err)
+      next(err, null, null)
+    else
+      evalTemplate(repl['templatePath'], repl, next)
+  })
 }
 
 module.exports = {
   evalTemplate: evalTemplate,
   namespaceTemplate: namespaceTemplate,
+  shortSession: shortSession,
+  replacementsForImageType: replacementsForImageType,
+  podNameForImageType: podNameForImageType,
+  infoForPodName: infoForPodName,
   templateForImageType: templateForImageType
 }
diff --git a/app/userSettings.js b/app/userSettings.js
index 4c2c15d25a6c1c35ceec3c3089df4ee83b79f993..d8e3fc604bd4ad3807c5d52e44d35e52d7a3b25a 100644
--- a/app/userSettings.js
+++ b/app/userSettings.js
@@ -1,14 +1,17 @@
 //const config = require('config')
 //const redis = require('redis')
 
-function userSettings(app, user, key) {
+/// Returns the settings key of the given app and user
+function getSetting(app, user, key) {
   return {}
 }
 
-module.exports = {
-  getOtherSetting: userSettings
+/// Returns the settings of this app for the given user and key
+function getAppSetting(user, key) {
+  getSetting('container-manager', user, key)
+}
 
-  getSetting: function(user, key) {
-    userSettings('container-manager', user, key)
-  }
+module.exports = {
+  getSetting: getSetting,
+  getAppSetting: getAppSetting
 }
diff --git a/app/webserver.js b/app/webserver.js
index b2656bb0fcc948d29be37d78a113b9c5ecefc434..a6812ae1d1feb4f9fd324bb0a5a580047002e956 100644
--- a/app/webserver.js
+++ b/app/webserver.js
@@ -109,12 +109,9 @@ module.exports = function(env,config, models, cmds) {
 
   if (cmds.includes('webserver')) {
     console.log('starting webserver')
-    const k8 = require('./kubernetes')(config);
-    const k8component = require('./components')(config);
-    const ProxyRouter = require('./ProxyRouter')(config,k8, k8component)
+    const ProxyRouter = require('./ProxyRouter')
     const proxyRouter = new ProxyRouter({
-      backend: client,
-      cache_ttl: 10
+      backend: client
     });
     //Agent is need to keep the connection: keep-alive header. But we not using it until really needed.
     //const agent = new http.Agent({ maxSockets: Number.MAX_VALUE });
diff --git a/config/default.hjson b/config/default.hjson
index 2e17dfaeda69c4873ee60474a20846169aca52ab..57bbfe22ec4c5f7878f63b3a236cecdce372ec80 100644
--- a/config/default.hjson
+++ b/config/default.hjson
@@ -5,7 +5,8 @@ app: {
   localOverride: "/localoverride"
   maxErrorQueue: 10
   redisTimeout: 3600
-  localCacheTimeout: 10
+  resolveCacheTtlMaxMs: 30000
+  resolveCacheNMax: 30000
   pageReloadTime: 5
   frontendAddr: "http://127.0.0.1:4200"
   baseUri: "http://127.0.0.1"
@@ -27,9 +28,9 @@ usersettings_redis: {
 k8Api: {
   url: "https://labdev3-nomad.esc.rzg.mpg.de:6443"
   node: "labdev3-nomad.esc.rzg.mpg.de"
-  ca: "/usr/src/app/certs/ca.cert"
-  cert: "/nomad/nomadlab/servers/labdev-nomad/kubernetes/kubelet-client.crt"
-  key: "/nomad/nomadlab/servers/labdev-nomad/kubernetes/kubelet-client.key"
+  ca: "certs/ca.crt"
+  cert: "certs/client.crt"
+  key: "certs/client.key"
 }
 passport: {
   strategy: "saml"
@@ -48,26 +49,31 @@ k8component: {
   templatePath: "defaultTemplate.yaml"
   keysToProtect: ["keysToProtect", "keysToSkip"]
   keysToSkip: ["templateCacheTtlMaxMs", "templateCacheNMax", "keysToSkip", "keysToProtect", "images"]
+  containerPerSession: false
   images: {
     beaker: {
+      keysToProtect: ["containerPerSession"]
       image: "labdev-nomad.esc.rzg.mpg.de:5000/nomadlab/notebook:v1.8.0-214-gdd60aa28-dirty",
       port: 8801,
       prefix: "/beaker",
       homePath: "/home/beaker"
     },
     jupyter: {
+      keysToProtect: ["containerPerSession"]
       image: "labdev-nomad.esc.rzg.mpg.de:5000/nomadlab/notebook-jupyter-libatoms-tutorial:v0.4",
       port: 8888,
       prefix: "/jupyter",
       homePath: "/home/beaker"
     },
     creedo: {
+      keysToProtect: ["containerPerSession"]
       image: "labdev-nomad.esc.rzg.mpg.de:5000/nomadlab/creedo:v0.4.2-2017-09-29",
       port: 8080,
       prefix: "/Creedo",
       homePath: "/home/creedo"
     },
     remotevis: {
+      keysToProtect: ["containerPerSession"]
       image: "labdev-nomad.esc.rzg.mpg.de:5000/nomadlab/notebook-jupyter-libatoms-tutorial",
       port: 8888,
       prefix: "/jupyter",
diff --git a/deploy.sh b/deploy.sh
new file mode 100644
index 0000000000000000000000000000000000000000..d4909342aad1a878b823ff2c56a494e75729448c
--- /dev/null
+++ b/deploy.sh
@@ -0,0 +1,59 @@
+version=$(git describe --tags --always --dirty)
+name="labdev-nomad.esc.rzg.mpg.de:5000/nomadlab/nomad-container-manager-$version"
+docker build -t $name .
+docker push $name
+
+cat >container-manager-namespace.yaml <<EOF
+kind: Namespace
+apiVersion: v1
+metadata:
+  name: analytics
+EOF
+
+cat >container-manager-deploy.yaml <<HERE
+apiVersion: apps/v1beta2
+kind: Deployment
+metadata:
+  name: nomad-container-manager
+  namespace: analytics
+  labels:
+    app: nomad-container-manager
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: nomad-container-manager
+  template:
+    metadata:
+      labels:
+        app: nomad-container-manager
+    spec:
+      containers:
+      - name: nomad-container-manager
+        image: $name
+        ports:
+        - containerPort: 80
+HERE
+
+
+cat >container-manager-service.yaml <<HERE
+kind: Service
+apiVersion: v1
+metadata:
+  name: nomad-container-manager
+spec:
+  selector:
+    app: nomad-container-manager
+  ports:
+  - protocol: TCP
+    port: 80
+    targetPort: 80
+  type: NodePort
+HERE
+
+echo "* For an initial deployment, launch with (choose configuration accordingly):"
+echo "kubectl --kubeconfig /nomad/nomadlab/kubernetes/dev/config create -f container-manager-service.yaml"
+echo "kubectl --kubeconfig /nomad/nomadlab/kubernetes/dev/config create -f container-manager-deploy.yaml"
+echo "* To siply update the deployment, just update the Kubernetes deployment without touching the service:"
+echo "kubectl --kubeconfig /nomad/nomadlab/kubernetes/dev/config delete -f container-manager-deploy.yaml"
+echo "kubectl --kubeconfig /nomad/nomadlab/kubernetes/dev/config create -f container-manager-deploy.yaml"
diff --git a/kubeTemplates/defaultTemplate.yaml b/kubeTemplates/defaultTemplate.yaml
index 494b30110fca7feb3065f947f6b41319612bdc28..c33baba972fe2c0457e6faf238eb2e1498eca28d 100644
--- a/kubeTemplates/defaultTemplate.yaml
+++ b/kubeTemplates/defaultTemplate.yaml
@@ -1,109 +1,50 @@
----
-{
-    "apiVersion": "v1",
-    "kind": "ReplicationController",
-    "metadata": {
-      "name": imageType + "-rc-" + user,
-      "labels": {
-        "user": user,
-        "app": imageType
-      },
-    },
-    "spec": {
-      "replicas": 1,
-      "selector":{
-        "user": user,
-        "app": imageType
-        },
-      "template": {
-        "metadata": {
-          "labels": {
-            "user":user,
-            "app": imageType
-          }
-        },
-        "spec": {
-          "containers": [
-            {
-              "image": imageInfo.image,
-              "name": imageType,
-              "ports": [
-                {
-                  "containerPort": imageInfo.port,
-                  "name": "main-port",
-                  "protocol": "TCP"
-                }
-              ],
-              "imagePullPolicy": "IfNotPresent",
-              "volumeMounts": [
-                {
-                  "mountPath": "/raw-data",
-                  "name": "raw-data-volume",
-                  "readOnly": true
-                },
-                {
-                  "mountPath": "/parsed",
-                  "name": "parsed-data-volume",
-                  "readOnly": true
-                },
-                {
-                  "mountPath": "/normalized",
-                  "name": "normalized-data-volume",
-                  "readOnly": true
-                },
-                {
-                  "mountPath": imageInfo.homePath+"/notebooks",
-                  "name": "notebooks-data-volume",
-                  "readOnly": true
-                },
-                {
-                  "mountPath": config.userInfo.privateDirInContainer+ "/" + user,
-                  "name": "private-data-volume"
-                },
-                {
-                  "mountPath": config.userInfo.sharedDirInContainer,
-                  "name": "shared-data-volume",
-                  "readOnly": true
-                },
-                {
-                  "mountPath": config.userInfo.sharedDirInContainer + "/" + user,
-                  "name": "my-shared-data-volume"
-                }
-              ]
-            }
-          ]
-          ,
-          volumes: [
-            {
-              "name": "parsed-data-volume",
-              "hostPath": { "path": "/nomad/nomadlab/parsed" }
-            },
-            {
-              "name": "raw-data-volume",
-              "hostPath": { "path": "/nomad/nomadlab/raw-data"}
-            },
-            {
-              "name": "normalized-data-volume",
-              "hostPath": { "path": "/nomad/nomadlab/normalized" }
-            },
-            {
-              "name": "notebooks-data-volume",
-              "hostPath": { "path": "/nomad/nomadlab/beaker-notebooks/notebooks" }
-            },
-            {
-              "name": "private-data-volume",
-              "hostPath": { "path": config.userInfo.privateDir + '/' + user }
-            },
-            {
-              "name": "shared-data-volume",
-              "hostPath": { "path": config.userInfo.sharedDir }
-            },
-            {
-              "name": "my-shared-data-volume",
-              "hostPath": { "path": config.userInfo.sharedDir + '/' + user }
-            }
-          ]
-        }
-      }
-    }
-  };
+apiVersion: "v1"
+kind: "ReplicationController",
+kind: Pod
+  labels:
+    user: "{{user}}"
+    app: "{{imageType}}"
+spec:
+  containers:
+  - image: "{{image}}"
+    name: "{{imageType}}"
+    ports:
+    - containerPort: "{{port}}"
+      name: "main-port"
+      protocol: "TCP"
+    imagePullPolicy: "IfNotPresent"
+    volumeMounts:
+    - mountPath: "/raw-data"
+      name: "raw-data-volume"
+      readOnly: true
+    - mountPath: "/parsed"
+      name: "parsed-data-volume"
+      readOnly: true
+    - mountPath: "/normalized"
+      name: "normalized-data-volume"
+      readOnly: true
+    - mountPath: "{{homePath}}/notebooks"
+      name: notebooks-data-volume
+      readOnly: true
+    - mountPath: "/data/private/{{user}}"
+      name: "private-data-volume"
+    - mountPath: "/data/shared"
+      name: "shared-data-volume"
+      readOnly: true
+    - mountPath: "/data/shared/{{user}}"
+      name: "my-shared-data-volume"
+  volumes:
+  - name: "parsed-data-volume"
+    hostPath: { "path": "/nomad/nomadlab/parsed" }
+  - name: "raw-data-volume"
+    hostPath: { "path": "/nomad/nomadlab/raw-data"}
+  - name: "normalized-data-volume"
+    hostPath: { "path": "/nomad/nomadlab/normalized" }
+  - name: "notebooks-data-volume"
+    hostPath: { "path": "/nomad/nomadlab/beaker-notebooks/notebooks" }
+  - name: "private-data-volume"
+    hostPath: { "path": "{{userDataHostPath}}/private/{{user}}" }
+  - name: "shared-data-volume"
+    hostPath: { "path": "{{userDataHostPath}}/shared" }
+  - name: "my-shared-data-volume"
+    hostPath: { "path": "{{userDataHostPath}}/shared/{{user}}" }
diff --git a/kubeTemplates/namespace.yaml b/kubeTemplates/namespace.yaml
index d0be10c76734bcc2fdc07bae74e5a088fdea6863..f0030d5d254dd2c161f6bf453dc171365d3a0fe1 100644
--- a/kubeTemplates/namespace.yaml
+++ b/kubeTemplates/namespace.yaml
@@ -1,4 +1,3 @@
----
 kind: Namespace
 apiVersion: v1
 metadata:
diff --git a/package.json b/package.json
index 7ab0313ef9323532579213ad3aab3fee735c88c0..0063434ff5d44bfa7b721f6d367dd51095f177f9 100644
--- a/package.json
+++ b/package.json
@@ -36,6 +36,7 @@
     "hjson": "^3.1.0",
     "http-proxy": "^1.15.2",
     "jade": "1.11.0",
+    "js-yaml": "^3.10.0",
     "json-stringify-safe": "^5.0.1",
     "kubernetes-client": "^3.18.0",
     "memory-store": "0.0.1",
diff --git a/repl.js b/repl.js
index 85489d0288750b5fbce67b836200ede8da2ee1e9..7bc7624e60987f5483420012cb050802464d6849 100644
--- a/repl.js
+++ b/repl.js
@@ -1,16 +1,23 @@
 const repl = require('repl')
 const config = require('config');
 const k8 = require('./app/kubernetes')(config);
-const compontents = require('./app/components')
+const components = require('./app/components')
 var ns = k8.ns(config.k8component.namespace)
 
 function getService() {
   ns.service.get(config.k8component.imageType + '-svc-fawzi2')
 }
 
+function testComponent() {
+  components.templateForImageType("beaker", "fawzi2", {'session': 'pippo'}, function(err,data,repl) {
+  console.log(`err: ${JSON.stringify(err)}, data: ${data}, repl: ${JSON.stringify(repl)}`)
+})
+}
+
 const r = repl.start('> ');
 r.context.config = config
 r.context.k8 = k8
 r.context.ns = ns
 r.context.getService = getService
 r.context.components = components
+r.context.testComponent = testComponent
diff --git a/safe-memory-cache b/safe-memory-cache
index 15c65058f9d1e82e8a5415cfa6e678c2edb9946d..e83ed463f583914124f9f9a489b4ae08a1d8989c 160000
--- a/safe-memory-cache
+++ b/safe-memory-cache
@@ -1 +1 @@
-Subproject commit 15c65058f9d1e82e8a5415cfa6e678c2edb9946d
+Subproject commit e83ed463f583914124f9f9a489b4ae08a1d8989c