diff --git a/GridQuality/Kisslinger.py b/GridQuality/Kisslinger.py
new file mode 100644
index 0000000000000000000000000000000000000000..ff3d1527573c731aa1fbe1299a272d6c74ead96a
--- /dev/null
+++ b/GridQuality/Kisslinger.py
@@ -0,0 +1,902 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Mon Dec  2 09:28:09 2019
+
+@author: viwi
+Read and modify Kisslinger meshes
+"""
+
+import numpy as np
+from mpl_toolkits.mplot3d import Axes3D
+import matplotlib.pyplot as plt
+from mayavi import mlab
+
+degtorad = np.pi/180
+
+def cylindertocartesian(r,z,p): 
+  r,z,p = np.array(r),np.array(z),np.array(p)
+  x = r*np.cos(p)
+  y = r*np.sin(p)
+  z = z
+  return [x,y,z]
+
+def load_components(pn='./W7X/'):
+  '''
+  Load components from Kisslinger files
+  '''
+  #List of components to use
+  lcomp = [pn+'WALL/vessel.medium']
+
+  pn = pn+'/PARTS/'
+  tmp = [\
+  'baf_lower_left_0_21',\
+  'baf_lower_left_19_28',\
+  'baf_lower_right',\
+  'baf_upper_left',\
+  'baf_upper_right',\
+  'div_hor_lower',\
+  'div_hor_upper',\
+  'div_ver_upper',\
+  'shield_0_21_fix2.wvn',\
+  'shield_21_28_fix2.wvn',\
+  'shield_28_31p5_fix2.wvn',\
+  'shield_31p5_32_fix2.wvn',\
+  'shield_32_32p5_fix2.wvn',\
+  'shield_32p5_35_fix2.wvn',\
+  'shield_35_36_fix2.wvn',\
+  'cover_lower_chamber_gap',\
+  'cover_lower_gap_0-1.0',\
+  'cover_lower_hor_div',\
+  'cover_lower_phi=21',\
+  'cover_lower_phi=28',\
+  'cover_upper_chamber_gap',\
+  'cover_upper_gap_0-1.5',\
+  'cover_upper_hor_div',\
+  'cover_upper_ver_div',\
+  'cover_upper_phi=19',\
+  'cut_lowerChamber',\
+  'cut_upperChamber'\
+  ]
+  
+  lcol = [(0.7,0.7,0.7)]
+  for i in range(len(tmp)):
+    lcol.append((0.0,0.0,0.0))
+
+  for i,item in enumerate(tmp):
+    lcomp.append(pn+item)
+
+  #Load components
+  lc = []
+  for item in lcomp:
+    print(item)
+    flag = is_upper(item)
+    if flag:
+      lc.append(Kisslinger(item).mirror_component())
+    else:
+      lc.append(Kisslinger(item))
+  return [lc, lcomp,lcol]
+
+def get_color(comp):
+  col = (0.2,0.2,0.2)
+  idx = comp.split('/')[-1]
+  if 'covers_' in idx:
+      col = (0,1,0)
+  elif 'div_' in idx:
+       col = (1,0,0)
+  elif 'baf_' in idx:
+       col = (0,0,1)
+  elif 'shield_' in idx:
+       col = (0.5,0.5,0.5)
+  else:
+       col = (1,1,1)
+  return col
+
+def is_upper(comp):
+  idx = comp.split('/')[-1]
+  return 'upper' in idx
+
+def plot_components(lc,phi=0.,idx=[],fig=None,ax=None):
+  '''
+  Plot components
+  '''
+  if fig is None: fig = plt.gcf()
+  if ax is None: ax = fig.gca()
+  for i,item in enumerate(lc):
+      if i in idx:
+          item.plot_phi_lineout(phi,fig,ax,color='r')
+      else:
+          item.plot_phi_lineout(phi,fig,ax,color='k')
+  plt.draw()
+  return fig
+
+def plot_mayavi_components(lc,op=0.995,skip=[],lcol=None,fig=None):
+  '''
+  Plot components with Mayavi
+  '''
+  if fig is None: fig = mlab.figure()
+  if lcol is None: lcol = [(1,1,1)]*len(lc)
+  for i,item in enumerate(lc):
+      if not(i in skip):
+        item.plot_mayavi_mesh(op=op,color=lcol[i],fig=fig)
+  return fig
+
+class Kisslinger():
+    """
+    Read, plot and modify Kisslinger meshes
+    
+    input: 
+        fn - filename of the mesh
+        
+    output:
+        Kisslinger.R - Radial values of surface mesh
+        Kisslinger.Z - Z values of the surface mesh
+        Kisslinger.P - Phi values of the surface mesh
+    """
+    def __init__(self, fn):
+        """
+        Read Kisslinger Mesh
+        
+        File structure:
+            
+            First row: description of mesh
+            
+            Second row:
+                First number  - nPhi in mesh (int)
+                Second number - nRZ in mesh (int)
+                Third number  - nfold periodicity (int)
+                Fourth number - R reference point (float)
+                Fifth number  - Z reference point (float)
+                
+            Third row onwards: mesh data
+        """
+        header=np.loadtxt(fn,skiprows=1, max_rows=1)
+        nphi=int(header[0])
+        nRZ=int(header[1])
+    #    print(' '.join(np.loadtxt(fn,dtype='str',max_rows=1)))
+        print('File: %s' %fn.split('/')[-1])
+        print('Kisslinger mesh with %d Phi values with %d R,Z values per phi.'%(nphi, nRZ))
+        print('%d-fold periodicity'%header[2])
+        print('Reference point: %f [cm] %f [cm] (R,Z)'%(header[3],header[4]))
+        
+        R=np.zeros((nphi,nRZ))
+        Z=np.zeros(np.shape(R))
+        P=np.ones(np.shape(R))
+        for i in range(nphi):
+            P[i,:]=np.multiply(P[i,:],np.loadtxt(fn,skiprows=2+(nRZ+1)*i, max_rows=1))
+            tmp=np.loadtxt(fn,skiprows=3+(nRZ+1)*i,max_rows=nRZ,usecols=(0,1))
+            R[i,:]=tmp[:,0]
+            Z[i,:]=tmp[:,1]
+        
+        self.R=R
+        self.Z=Z
+        self.P=P
+        self.nphi=nphi
+        self.nRZ=nRZ
+        self.Rref=header[3]
+        self.Zref=header[4]
+        self.nfold=header[2]
+        return
+
+    def mirror_component(self):
+      ''' 
+        Mirror component 
+      '''
+      self.R = self.R[::-1,:]
+      self.Z = -self.Z[::-1,:]
+      self.P = -self.P[::-1,:]
+      return self
+    
+    def rotate_component(self,phi=36.):
+      ''' 
+        Rotate component by angle phi 
+      '''
+      self.P += phi
+      return self
+
+    def refine_poloidal_resolution(self,fac):
+        '''
+        Refine poloidal resolution by fac
+        '''
+        R,Z,P =[],[],[]
+        for i in range(self.nRZ-1):
+            R0,Z0,R1,Z1 = self.R[:,i],self.Z[:,i],self.R[:,i+1],self.Z[:,i+1]
+            P0 = self.P[:,i]
+            dR, dZ = (R1-R0)/fac, (Z1-Z0)/fac
+            for j in range(fac):
+                R.append(R0+dR*j)
+                Z.append(Z0+dZ*j)
+                P.append(P0)
+            #Add last point of last intervall (not in refinement)
+            R.append(R1)
+            Z.append(Z1)
+            P.append(P0)
+    
+        R,Z,P = np.array(R).T,np.array(Z).T,np.array(P).T
+        #dat = data.data()
+        #dat.R, dat.Z, dat.P = R,Z,P
+        #self.plot_mayavi_mesh(self)
+        #self.plot_mayavi_mesh(dat)
+        self.R,self.Z,self.P = R,Z,P
+        self.nRZ = self.R.shape[1]
+        return
+    
+    def refine_toroidal_resolution(self,fac):
+        '''
+        Refine toroidal resolution by fac
+        '''
+        R,Z,P = [],[],[]
+        for i in range(self.nphi-1):
+            P0,P1 = self.P[i,0],self.P[i+1,0]
+            dphi = (P1- P0)/fac
+            for j in range(fac):
+                P.append(np.ones(self.nRZ)*P0+j*dphi)
+                Rp,Zp = self.get_phi_lineout(P[-1][0])
+                R.append(Rp)
+                Z.append(Zp)
+        #Add last point of last intervall (not in refinement)
+        P.append(self.P[-1,:])
+        Rp,Zp = self.get_phi_lineout(P[-1][0])
+        R.append(Rp)
+        Z.append(Zp)
+
+        R,Z,P = np.array(R),np.array(Z),np.array(P)
+        #dat = data.data()
+        #dat.R, dat.Z, dat.P = R, Z, P
+        #self.plot_mayavi_mesh(self)
+        #self.plot_mayavi_mesh(dat)
+        self.R,self.Z,self.P = R,Z,P
+        self.nphi = self.R.shape[0]
+        return
+
+    def refine_resolution(self,facpol,factor):
+        '''
+        Refine poloidal/toroidal resolution by facpol/factor
+        '''
+        self.refine_poloidal_resolution(facpol)
+        self.refine_toroidal_resolution(factor)
+        return
+
+    def to_tri(self):
+        """
+        Convert Kisslinger data to triangles as done in EMC3
+        
+        In Kisslinger.py - R=shape(nphi, nRZ)
+        TRI_ELE = number of triangles made from the kisslinger mesh
+        X_TRIA,Y_TRIA, Z_TRIA = x,y,z vertices of the triangles
+        V_TRIA_X, V_TRIA_Y, V_TRIA_Z = x,y,z components of surface normal for  each triangle
+        """
+        #check the total number of triangles 
+        TRI_ELE=0
+        for j in range(self.nphi-1):
+            for i in range(self.nRZ-1):
+                if self.R[j,i]!=self.R[j,i+1] or self.Z[j,i]!=self.Z[j,i+1]:
+                    TRI_ELE=TRI_ELE+1
+                if self.R[j+1,i]!=self.R[j+1,i+1] or self.Z[j+1,i]!=self.Z[j+1,i+1]:
+                    TRI_ELE=TRI_ELE+1
+        print('%i Triangles' %TRI_ELE)
+        
+        #compute vertices of triangle, done similarly to L1367 of NEUTRAL.f
+        #subroutine LIM_ADD
+        X_TRIA=np.zeros((3,TRI_ELE))
+        Y_TRIA=np.zeros((3,TRI_ELE))
+        Z_TRIA=np.zeros((3,TRI_ELE))
+        TRI_ELE=0
+        for j in range(self.nphi-1):
+            for i in range(self.nRZ-1):
+                COS1=np.cos(self.P[j,1]*np.pi/180)
+                COS2=np.cos(self.P[j+1,1]*np.pi/180)
+                SIN1=np.sin(self.P[j,1]*np.pi/180)
+                SIN2=np.sin(self.P[j+1,1]*np.pi/180)
+                if self.R[j+1,i]!=self.R[j+1,i+1] or self.Z[j+1,i]!=self.Z[j+1,i+1]:
+                    X_TRIA[0,TRI_ELE]=self.R[j,i]*COS1
+                    Y_TRIA[0,TRI_ELE]=self.R[j,i]*SIN1
+                    Z_TRIA[0,TRI_ELE]=self.Z[j,i]
+                    
+                    X_TRIA[1,TRI_ELE]=self.R[j+1,i+1]*COS2
+                    Y_TRIA[1,TRI_ELE]=self.R[j+1,i+1]*SIN2
+                    Z_TRIA[1,TRI_ELE]=self.Z[j+1,i+1]
+                    
+                    X_TRIA[2,TRI_ELE]=self.R[j+1,i]*COS2
+                    Y_TRIA[2,TRI_ELE]=self.R[j+1,i]*SIN2
+                    Z_TRIA[2,TRI_ELE]=self.Z[j+1,i]
+                    TRI_ELE=TRI_ELE+1
+                
+                if self.R[j,i]!=self.R[j,i+1] or self.Z[j,i]!=self.Z[j,i+1]:
+                    X_TRIA[0,TRI_ELE]=self.R[j,i]*COS1
+                    Y_TRIA[0,TRI_ELE]=self.R[j,i]*SIN1
+                    Z_TRIA[0,TRI_ELE]=self.Z[j,i]
+                    
+                    X_TRIA[1,TRI_ELE]=self.R[j,i+1]*COS1
+                    Y_TRIA[1,TRI_ELE]=self.R[j,i+1]*SIN1
+                    Z_TRIA[1,TRI_ELE]=self.Z[j,i+1]
+                    
+                    X_TRIA[2,TRI_ELE]=self.R[j+1,i+1]*COS2
+                    Y_TRIA[2,TRI_ELE]=self.R[j+1,i+1]*SIN2
+                    Z_TRIA[2,TRI_ELE]=self.Z[j+1,i+1]
+                    TRI_ELE=TRI_ELE+1
+        
+        #now we have the vertices. Compute the vertex normals according to L1008 of NEUTRAL.f
+        #subroutine SETUP_ADD_SF_N0
+        V_TRIA_X=np.zeros((TRI_ELE,))
+        V_TRIA_Y=np.zeros((TRI_ELE,))
+        V_TRIA_Z=np.zeros((TRI_ELE,))
+        
+        for i in range(TRI_ELE):
+            V_TRIA_X[i]=((Y_TRIA[1,i]-Y_TRIA[0,i])*(Z_TRIA[2,i]-Z_TRIA[0,i])) - ((Z_TRIA[1,i]-Z_TRIA[0,i])*(Y_TRIA[2,i]-Y_TRIA[0,i]))
+            
+            V_TRIA_Y[i]=((Z_TRIA[1,i]-Z_TRIA[0,i])*(X_TRIA[2,i]-X_TRIA[0,i])) - ((X_TRIA[1,i]-X_TRIA[0,i])*(Z_TRIA[2,i]-Z_TRIA[0,i]))
+            
+            V_TRIA_Z[i]=((X_TRIA[1,i]-X_TRIA[0,i])*(Y_TRIA[2,i]-Y_TRIA[0,i])) - ((Y_TRIA[1,i]-Y_TRIA[0,i])*(X_TRIA[2,i]-X_TRIA[0,i]))
+            
+        self.xtri=X_TRIA
+        self.ytri=Y_TRIA
+        self.ztri=Z_TRIA
+        self.vx=V_TRIA_X
+        self.vy=V_TRIA_Y
+        self.vz=V_TRIA_Z
+        return
+    
+    def plot_tri(self, oplot=False, fig=None, ax=None):
+        """
+        Plot triangles with their vertices
+        
+        oplot: option to overplot on top of figure given by fig, ax
+        
+        """
+        x=np.reshape(self.xtri, (np.size(self.xtri),), order='F')
+        print(x[0:3])
+        print(self.xtri[:,0])
+        y=np.reshape(self.ytri, (np.size(self.ytri),), order='F')
+        z=np.reshape(self.ztri, (np.size(self.ztri),), order='F')
+        shape=np.shape(self.xtri)
+        triangles=np.zeros((shape[1],3))
+        for i in range(shape[1]):
+            triangles[i,0]=3*i
+            triangles[i,1]=3*i+1
+            triangles[i,2]=3*i+2
+            
+        if not oplot:     
+            fig=plt.figure()
+            ax=fig.gca(projection='3d')
+            ax.plot_trisurf(x,y,z, triangles=triangles)
+        else:
+            ax.plot_trisurf(x,y,z, triangles=triangles)
+            
+        return fig, ax
+
+    def make_solid(self,dw=0.1):
+      """
+      Add second recessed surface to make wall thick. 
+      Recessment in direction of local normals
+      """
+
+    def modify_normals(self):
+        """
+        Change order of phi to see if this adequately modifies the surface normals
+        """
+        P=np.zeros(np.shape(self.P))
+        R=np.zeros(np.shape(self.R))
+        Z=np.zeros(np.shape(self.Z))
+        for i in range(self.nphi):
+            P[i,:]=self.P[-1-i,:]
+            R[i,:]=self.R[-1-i,:]
+            Z[i,:]=self.Z[-1-i,:]
+        self.Pmod=P
+        self.Rmod=R
+        self.Zmod=Z
+        return
+            
+    def get_phi_lineout(self, phi, verb=False):
+        """
+        grabs the R and Z data for a given phi value
+        """
+        if np.min(self.P)>phi or np.max(self.P)<phi:
+            if verb: print('Component out of range of phi value of interest')
+            R=None; Z=None
+        else:
+            ind=np.where(self.P[:,0] == phi)
+            #print(ind[0])
+            R=np.squeeze(self.R[ind,:])
+            Z=np.squeeze(self.Z[ind,:])
+            
+            #perform linear interpolation between points if phi lies between
+            #two of the self.P points
+            if len(ind[0])==0:
+                
+                #find the phi points that are closest to the value of phi
+                tmp=self.P[:,0] - phi
+                indn=np.where(tmp<0)
+                negval=np.max(tmp[indn])
+                indn=np.where(tmp==negval)
+                
+                indp=np.where(tmp>0)
+                posval=np.min(tmp[indp])
+                indp=np.where(tmp==posval)
+                
+                #linear interpolation
+                m=np.divide(self.R[indp,:]-self.R[indn,:],self.P[indp,0]-self.P[indn,0])
+                b=self.R[indp,:]-np.multiply(m,self.P[indp,0])
+                R=np.squeeze(np.multiply(m,phi)+b)
+
+                
+                m=np.divide(self.Z[indp,:]-self.Z[indn,:],self.P[indp,0]-self.P[indn,0])
+                b=self.Z[indp,:]-np.multiply(m,self.P[indp,0])
+                Z=np.squeeze(np.multiply(m,phi)+b)
+            
+        return R,Z
+    
+    def plot_phi_lineout(self,phi,fig,ax,color='r'):
+        R,z = self.get_phi_lineout(phi)
+        if R is None: return
+        print(R)
+        nl = R.shape[0]
+        for i in range(nl-1):
+            ax.plot([R[i],R[i+1]],[z[i],z[i+1]],color=color)
+        return fig
+
+    def get_points_sample(self,rres,tres,pcells, g3d,report=False):
+        '''
+        Function to get a sampling of points along a Kisslinger mesh
+        '''
+
+        f=open(g3d, 'r')
+        nr,nz,nt=np.fromfile(f, dtype='int', sep=' ', count=3)
+        R=np.zeros((nr,nz,nt))
+        Z=np.zeros((nr,nz,nt))
+        X=np.zeros((nr,nz,nt))
+        Y=np.zeros((nr,nz,nt))
+
+        phi=np.zeros((nt,))
+        for i in range(nt):
+            phi[i]=np.fromfile(f,dtype='float', sep=' ', count=1)
+            tmpR=np.fromfile(f,dtype='float', sep=' ', count=nr*nz)
+            tmpR=np.reshape(tmpR,(nr,nz),order='F')
+            R[:,:,i]=tmpR
+            tmpZ=np.fromfile(f,dtype='float',sep=' ', count=nr*nz)
+            tmpZ=np.reshape(tmpZ,(nr,nz), order='F')
+            Z[:,:,i]=tmpZ
+            X[:,:,i]=tmpR*np.cos(phi[i]*np.pi/180)
+            Y[:,:,i]=tmpR*np.sin(phi[i]*np.pi/180)
+        f.close()
+
+        #read CELL_GEO file
+        f=open(pcells, 'r')
+        #nc = number of cells; npl=#cells for plasma transport; nn=#cells for neutral transport
+        nc, npl, nn=np.fromfile(f,dtype='int', sep=' ', count=3)
+        idcell=np.fromfile(f,dtype='int', sep=' ', count=np.size(R))
+        f.close()
+        #form boolean matrix for whether cell is a plasma cell or not
+        ingrid=np.zeros((nr-1,nz-1,nt-1))
+        ingrid=np.reshape(ingrid,(np.size(ingrid),), order='F')
+        for i in range(len(ingrid)):
+            if idcell[i] > npl:
+                ingrid[i]=0
+            else:
+                ingrid[i]=1
+ 
+        ingrid=np.reshape(ingrid,(nr-1,nz-1,nt-1),order='F')
+
+        self.to_tri() #obtain coordinates as triangles
+        shape=np.shape(self.xtri)
+        centroids=np.zeros((shape[1],3))
+        #get the centroids of the triangles of the component
+        for i in range(shape[1]):
+            centroids[i,0]=np.sum(self.xtri[:,i])/3
+            centroids[i,1]=np.sum(self.ytri[:,i])/3
+            centroids[i,2]=np.sum(self.ztri[:,i])/3
+
+        nlineout=(self.nRZ-1) #number of centroids for a given phi (taking only 1 of triangle pair)
+        ntor=self.nphi-1    #number of centroids toroidally at one R,Z position
+
+        print('%i points per poloidal cross section' %nlineout)
+        print('%i points toroidally per RZ pair' %ntor)
+
+        vectors=np.zeros((shape[1],3))
+        vectors[:,0]=self.vx
+        vectors[:,1]=self.vy
+        vectors[:,2]=self.vz
+
+        #reducing the number of sampled points
+        easyredp=np.floor(nlineout/rres)
+        if (easyredp < 1):
+            print('Error: poloidal resolution desired is higher than resolution of component.\n Using default value...')
+            easyredp=1; fineredp=0
+
+        easyredt=np.floor(ntor/tres)
+        if (easyredt < 1):
+            print('Error: toroidal resolution desired is higher than resolution of component. \n Using default value...')
+            easyredt=1; fineredt=0
+
+        #reduce in poloidal plane first
+        centroids=centroids[::2,:] #take only one of the triangles
+        vectors=vectors[::2,:]     #take only one of the triangles
+
+        #normalize vectors to unity
+        for i in range(np.shape(vectors)[0]):
+            mag=np.sqrt(np.multiply(vectors[i,0],vectors[i,0])+np.multiply(vectors[i,1],vectors[i,1])+np.multiply(vectors[i,2],vectors[i,2]))
+            vectors[i,:]/=mag
+
+	#create masked array of the original resolution
+        marray=np.array(np.ones((ntor,nlineout)))
+        marray2=np.array(np.ones((ntor,nlineout)))
+
+	#reduce toroidal/poloidal resolution
+        marray[::int(easyredt),:]=0
+        marray2=np.reshape(marray2,[len(centroids[:,0]),],order='F')
+        marray2[::int(easyredp)]=0
+        marray2=np.reshape(marray2,[ntor,nlineout], order='F')
+
+        #combining toroidal and poloidal reductions
+        marray+=marray2
+        marray[np.where(marray==2)]=1
+
+        #mask values which are not wanted in the point sample
+        marray=np.reshape(marray,[len(centroids[:,0]),], order='C')
+        centroids=np.ma.array(centroids)
+        vectors=np.ma.array(vectors)
+        for i in range(3):
+            centroids[:,i]=np.ma.masked_where(marray == 1,centroids[:,i])
+            vectors[:,i]=np.ma.masked_where(marray == 1, vectors[:,i])
+
+        #check whether unmasked points are inside or outside of the plasma grid
+        shape=np.shape(centroids)
+        centroids2=np.zeros(np.shape(centroids))
+        for i in range(shape[0]):
+            if centroids.mask[i,0]==True:
+                continue
+            else:
+                centroids2[i,:]=self.point_in_grid(centroids[i,:],vectors[i,:], R, phi, X, Y, Z, ingrid,report)
+        ind=np.where(centroids2[:,0]!=0)
+        centroids2=centroids2[ind[0],:]
+        vectors=vectors[ind[0],:]
+        ind=np.where(np.isnan(centroids2[:,0]) == False)
+        centroids2=centroids2[ind[0],:]
+        vectors=vectors[ind[0],:]
+	
+        #convert from x,y,z to cylindrical coordinates
+        #centroidsf[:,0] = R
+	#centroidsf[:,1] = Z
+        #centroidsf[:,2] = phi
+        #similarly with directions for vectorsf
+
+        centroidsf=np.zeros(np.shape(centroids2))
+        centroidsf[:,0]=np.sqrt(np.multiply(centroids2[:,0],centroids2[:,0])+np.multiply(centroids2[:,1],centroids2[:,1]))
+        centroidsf[:,2]=np.arctan2(centroids2[:,1],centroids2[:,0])
+        centroidsf[:,1]=centroids2[:,2]
+
+        vectorsf=np.zeros(np.shape(centroids2))
+        vectorsf[:,0]=np.multiply(np.cos(np.arctan2(vectors[:,1],vectors[:,0])),vectors[:,0])+np.multiply(np.sin(np.arctan2(vectors[:,1],vectors[:,0])),vectors[:,1])
+        vectorsf[:,2]=np.multiply(-np.sin(np.arctan2(vectors[:,1],vectors[:,0])),vectors[:,0])+np.multiply(np.cos(np.arctan2(vectors[:,1],vectors[:,0])),vectors[:,1])
+        vectorsf[:,1]=vectors[:,2]
+        ind=np.where(vectorsf[:,2] < 1E-5)
+        vectorsf[ind[0],2]=0
+        centroidsf[:,2]=centroidsf[:,2]*180/np.pi
+        return centroidsf,vectorsf
+
+    def point_in_grid(self, p ,v,R,phi,X,Y,Z,ingrid, report=False):
+        """
+        checks is point is within plasma grid
+        requires CELL_GEO, GRID_3D_DATA
+
+	if it is not in the grid, it will move the point along its vector until it is in the grid
+        """
+
+        p_not_in_grid=True
+        loopcount=0
+
+        while p_not_in_grid:
+            #move point along its vector if not in grid
+            p[0]=p[0]+loopcount*v[0]; p[1]=p[1]+loopcount*v[1]; p[2]=p[2]+loopcount*v[2]
+            pr=np.sqrt(np.multiply(p[0],p[0])+np.multiply(p[1],p[1]))
+            pz=p[2]
+            pt=np.arctan2(p[1],p[0])*180/np.pi
+            pphi=np.arctan2(p[1],p[0])
+            #determine where in toroidal cross section grid is
+            resid=np.subtract(phi,pt)
+            indn=np.where(resid < 0)
+            indp=np.where(resid > 0)
+            valn=np.max(resid[indn]); valp=np.min(resid[indp])
+            ind1=np.where(resid == valn); ind2=np.where(resid == valp)
+            phi1=phi[ind1]*np.pi/180;phi2=phi[ind2]*np.pi/180
+            if report:
+                fig,ax=plt.subplots()
+                ax.pcolormesh(R[:,:,np.squeeze(ind1)],Z[:,:,np.squeeze(ind1)],np.zeros(np.shape(R[:,:,np.squeeze(ind1)])))
+                ax.scatter(pr,pz)
+                r2,z2=self.get_phi_lineout(pt)
+                ax.plot(r2,z2)
+                plt.show()
+            #determine where in R, Z index the closest gridpoint is
+            rtmp=np.squeeze(R[:,:,np.squeeze(ind1):np.squeeze(ind2)+1])
+            ztmp=np.squeeze(Z[:,:,np.squeeze(ind1):np.squeeze(ind2)+1])
+            xtmp=np.squeeze(X[:,:,np.squeeze(ind1):np.squeeze(ind2)+1])
+            ytmp=np.squeeze(Y[:,:,np.squeeze(ind1):np.squeeze(ind2)+1])
+            
+            xtmp2=np.reshape(xtmp,np.size(rtmp),order='F')
+            ytmp2=np.reshape(ytmp,np.size(ytmp),order='F')
+            ztmp2=np.reshape(ztmp,np.size(ztmp),order='F')
+            rx=np.subtract(xtmp2,p[0]); ry=np.subtract(ytmp2, p[1]); rz=np.subtract(ztmp2,p[2])
+            
+            resid2=np.sqrt(np.multiply(rx,rx)+np.multiply(ry,ry)+np.multiply(rz,rz))
+            ind=np.where(resid2 == np.min(resid2))
+            indg=np.where(xtmp == xtmp2[ind])
+
+
+            '''
+            Finding cell which contains the point.
+            Center around gridpoints where residual is the smallest (indg)
+            '''   
+            pp=p
+            #get faces of cube
+            indg0=np.squeeze(indg[0]); indg1=np.squeeze(indg[1])
+            indx1=indg0-10
+            if indg0-10 < 0:
+                indx1=0
+            indx2=indg0+10
+            if indg0+10 > len(xtmp[:,0,0])-1:
+                indx2=len(xtmp[:,0,0])-1
+
+            indy1=indg1-10
+            if indg1-10 < 0:
+                indy1=0
+            indy2=indg1+10
+            if indg1+10 > len(xtmp[0,:,0])-1:
+                 indy2=len(xtmp[0,:,0])-1
+            cind=[0,0,0]
+            for i in range(indx1,indx2):
+                for j in range(indy1,indy2):
+                    points=np.zeros((8,3))
+                    points[0,:]=np.array([xtmp[i,j,0],    ytmp[i,j,0],    ztmp[i,j,0]])
+                    points[3,:]=np.array([xtmp[1+i,j,0],  ytmp[1+i,j,0],  ztmp[1+i,j,0]])
+                    points[4,:]=np.array([xtmp[i,1+j,0],  ytmp[i,1+j,0],  ztmp[i,1+j,0]])
+                    points[1,:]=np.array([xtmp[i,j,1],    ytmp[i,j,1],    ztmp[i,j,1]])
+                    points[2,:]=np.array([xtmp[1+i,j,1],  ytmp[1+i,j,1],  ztmp[1+i,j,1]])
+                    points[5,:]=np.array([xtmp[i,1+j,1],  ytmp[i,1+j,1],  ztmp[i,1+j,1]])
+                    points[6,:]=np.array([xtmp[1+i,1+j,1],ytmp[1+i,1+j,1],ztmp[1+i,1+j,1]])
+                    points[7,:]=np.array([xtmp[1+i,1+j,0],ytmp[1+i,1+j,0],ztmp[1+i,1+j,0]])
+                    vect=np.zeros((6,3))
+                    vect[0,:]=-np.cross(points[0,:]-points[1,:],points[0,:]-points[4,:])
+                    vect[1,:]=-np.cross(points[0,:]-points[4,:],points[0,:]-points[3,:])
+                    vect[2,:]=np.cross(points[0,:]-points[1,:],points[0,:]-points[3,:])
+                    vect[3,:]=np.cross(points[6,:]-points[5,:],points[6,:]-points[7,:])
+                    vect[4,:]=np.cross(points[6,:]-points[7,:],points[6,:]-points[2,:])
+                    vect[5,:]=np.cross(points[6,:]-points[2,:],points[6,:]-points[5,:])
+                    for k in range(6):
+                        vect[k,:]=vect[k,:]/np.sqrt(np.multiply(vect[k,0],vect[k,0])+np.multiply(vect[k,1],vect[k,1])+np.multiply(vect[k,2],vect[k,2]))
+
+                    cond=np.zeros((6,))
+                    faces=np.zeros((6,3)) # face centers of cell
+                    faces[0,:]=(points[0,:]+points[1,:]+points[4,:]+points[5,:])/4
+                    faces[1,:]=(points[0,:]+points[3,:]+points[7,:]+points[4,:])/4
+                    faces[2,:]=(points[0,:]+points[3,:]+points[2,:]+points[1,:])/4
+                    faces[3,:]=(points[6,:]+points[5,:]+points[4,:]+points[7,:])/4
+                    faces[4,:]=(points[6,:]+points[7,:]+points[3,:]+points[2,:])/4
+                    faces[5,:]=(points[6,:]+points[5,:]+points[1,:]+points[2,:])/4
+                    for k in range(6):
+                        if np.matmul(pp-faces[k,:], vect[k,:].T) < 0:
+                            cond[k]=1
+                        else:
+                            continue
+
+                    ind=np.where(cond == 1)
+                    if (len(ind[0]) == len(cond)):
+                        if report:
+                            print('Cell Index of Point: %i %i %i' %(i,j,np.squeeze(ind1)))
+                        '''
+                        fig=plt.figure()
+                        ax=fig.add_subplot(111, projection='3d')
+                        ax.scatter(faces[:,0],faces[:,1],faces[:,2])
+                        ax.scatter(points[:,0],points[:,1],points[:,2])
+                        ax.scatter(pp[0],pp[1],pp[2], color='k')
+                        ax.plot([points[0,0],points[3,0]],[points[0,1],points[3,1]], [points[0,2],points[3,2]], color='b')
+                        ax.plot([points[0,0],points[4,0]],[points[0,1],points[4,1]], [points[0,2],points[4,2]], color='b')
+                        ax.plot([points[0,0],points[1,0]],[points[0,1],points[1,1]], [points[0,2],points[1,2]], color='b')
+                        ax.plot([points[3,0],points[7,0]],[points[3,1],points[7,1]], [points[3,2],points[7,2]], color='b')
+                        ax.plot([points[4,0],points[7,0]],[points[4,1],points[7,1]], [points[4,2],points[7,2]], color='b')
+                        ax.plot([points[4,0],points[5,0]],[points[4,1],points[5,1]], [points[4,2],points[5,2]], color='b')
+                        ax.plot([points[5,0],points[6,0]],[points[5,1],points[6,1]], [points[5,2],points[6,2]], color='b')
+                        ax.plot([points[6,0],points[2,0]],[points[6,1],points[2,1]], [points[6,2],points[2,2]], color='b')
+                        ax.plot([points[2,0],points[3,0]],[points[2,1],points[3,1]], [points[2,2],points[3,2]], color='b')
+                        ax.plot([points[1,0],points[5,0]],[points[1,1],points[5,1]], [points[1,2],points[5,2]], color='b')
+                        ax.plot([points[6,0],points[7,0]],[points[6,1],points[7,1]], [points[6,2],points[7,2]], color='b')
+                        ax.plot([points[1,0],points[2,0]],[points[1,1],points[2,1]], [points[1,2],points[2,2]], color='b')
+                        for k in range(6):
+                            ax.plot([faces[k,0],faces[k,0]+vect[k,0]*2],[faces[k,1],faces[k,1]+vect[k,1]*2],[faces[k,2],faces[k,2]+vect[k,2]*2],color=(k*(1/6),k*(1/6),k*(1/6)), linewidth=3)
+                        plt.show()
+                        '''
+                        cind=[i,j,np.squeeze(ind1)]
+                        break
+                if (len(ind[0]) == len(cond)):
+                    break
+
+            '''
+            Cell index of point location has been found
+            Check whether it is located in a plasma cell 
+            '''
+            if cind == [0,0,0]:
+                if report:
+                    print('Point cannot be placed in plasma grid!')
+                return
+            if ingrid[cind[0],cind[1],cind[2]]==1:
+                if report:
+                    print('Point is inside the plasma grid!')
+                    print('P(X,Y,Z)= %f, %f, %f'%(p[0],p[1],p[2]))
+                p_not_in_grid=False
+            else:
+                if report:
+                    print('Point is not inside the plasma grid. :( ')
+                loopcount+=1
+
+        return p
+    
+    def plot_mesh(self, R=None, P=None, Z=None, oplot=False, fig=None,ax=None):
+        """
+        Plots Kisslinger mesh with Axes3D
+        
+        Option: 
+            Plot modified mesh
+            Inputs:
+                R - R values of mesh surface
+                P - phi values of mesh surface
+                Z - Z values of mesh surface
+        """
+        if oplot:
+            if np.size(R)==1:
+                ax.plot_surface(self.R, self.P, self.Z, alpha=0.5, edgecolor='black')
+            else:
+                ax.plot_surface(R,P,Z, alpha=0.5, edgecolor='black')
+        else:
+            fig=plt.figure()
+            ax=fig.add_subplot(111,projection='3d')
+            if np.size(R)==1:
+                ax.plot_surface(self.R, self.P, self.Z, alpha=0.5, edgecolor='black')
+            else:
+                ax.plot_surface(R,P,Z, alpha=0.5, edgecolor='black')
+        ax.set_xlabel('Radius [cm]', fontsize=14, fontweight='bold')
+        ax.set_ylabel('Phi [deg]', fontsize=14, fontweight='bold')
+        ax.set_zlabel('Z [cm]', fontsize=14, fontweight='bold')
+        
+        return fig, ax
+        
+    def plot_mayavi_mesh(self, dat=None, fig=None, color=None,op=0.5):
+        """
+        Plots Kisslinger mesh with Axes3D (Mayavi)
+        
+        Option: 
+            Plot modified mesh
+            Inputs:
+                dat with memebers:
+                R - R values of mesh surface
+                P - phi values of mesh surface
+                Z - Z values of mesh surface
+        """
+        if fig is None: fig=mlab.figure()
+        if color is None: color = (1,1,1)
+        if dat is None: dat = self
+        
+        X,Y,Z = cylindertocartesian(dat.R,dat.Z,dat.P*degtorad)
+        mlab.mesh(X,Y,Z,opacity=op,color=color,representation='surface')
+        #ax.set_xlabel('Radius [cm]', fontsize=14, fontweight='bold')
+        #ax.set_ylabel('Phi [deg]', fontsize=14, fontweight='bold')
+        #ax.set_zlabel('Z [cm]', fontsize=14, fontweight='bold')
+        return fig
+  
+    def set_modified_mesh(self):
+        """
+        Set modified mesh for saving
+        """
+        self.Rmod = self.R
+        self.Zmod = self.Z
+        self.Pmod = self.P
+        return
+
+    def modify_mesh(self, data, varname):
+        """
+        Provide modified mesh data to class for saving
+        input: 
+            data - modified R, P, or Z matrix
+            varname - variable to save to, choices:
+                        0=Rmod - modified R matrix
+                        1=Pmod - modified phi matrix
+                        2=Zmod - modified Z matrix
+        """
+        if varname==0:
+            self.Rmod=data
+        elif varname==1:
+            self.Pmod=data
+        else:
+            self.Zmod=data
+        return
+        
+    def save_mesh(self, header, fn ,fset=True):
+        """
+        Saves Kisslinger mesh
+        
+        input:
+            header - Comment on description of mesh
+            fn     - filename to save mesh to
+            fset   - Set class mesh as modified
+            
+        """
+        if fset: self.set_modified_mesh()
+        size=np.shape(self.Rmod)
+        np.savetxt(fn, [], header=header)
+        f=open(fn,'ab')
+        np.savetxt(f,np.column_stack((size[0], size[1],int(self.nfold),self.Rref, self.Zref)),fmt='%d %d %d %f %f')
+        for i in range(size[0]):
+            np.savetxt(f, [self.Pmod[i, 0]], fmt="%.5f")
+            np.savetxt(
+                f,
+                np.asarray([self.Rmod[i, :], self.Zmod[i, :]]).T,
+                delimiter=" ",
+                fmt="%.5f",
+            )
+        f.close()
+        return
+
+if __name__=='__main__':
+  fn='./W7X/PARTS/DIV/2012/divhorn9.t'
+  f=Kisslinger(fn)
+  f.modify_normals()
+  p,v=f.get_points_sample(5,5, 'CELL_GEO', 'GRID_3D_DATA')
+  shape=np.shape(p)
+  for i in range(shape[0]):
+    if p[i,0]==0:
+      continue
+    else:
+      print(p[i,:])
+      print(v[i,:])
+'''
+#  f.refine_poloidal_resolution(12)
+  f.to_tri()
+  x=f.xtri
+  y=f.ytri
+  z=f.ztri
+  vx=f.vx
+  vy=f.vy
+  vz=f.vz
+  shape=np.shape(x)
+  #get centroid of triangles
+  c=np.zeros((shape[1],3))
+  for i in range(len(c)):
+    c[i,0]=np.sum(x[:,i])/3
+    c[i,1]=np.sum(y[:,i])/3
+    c[i,2]=np.sum(z[:,i])/3
+  c2=np.zeros((int(len(c[:,0])/2),3))
+  c2=c[::2,:]
+  vx=vx[::2]
+  vy=vy[::2]
+  vz=vz[::2]
+  print(len(c2[:,0]))
+  c3=np.zeros((221,3))
+  print(len(c2[:,0])/3)
+  vx2=np.zeros((221,))
+  vy2=np.zeros((221,))
+  vz2=np.zeros((221,))
+  for i in range(17):
+    c3[13*i:13*i+13,:]=c2[26*i:26*i+13,:]
+    vx2[13*i:13*i+13]=vx[26*i:26*i+13]
+    vy2[13*i:13*i+13]=vy[26*i:26*i+13]
+    vz2[13*i:13*i+13]=vz[26*i:26*i+13]
+  print(len(c3))
+  fig,ax=f.plot_tri()
+#  ax.scatter(c3[::2,0], c3[::2,1], c3[::2,2], c='k')
+  c4=c3[::2,:]
+  vx2=vx2[::2]
+  vy2=vy2[::2]
+  vz2=vz2[::2]
+  result=np.zeros((48,3))
+  result2=np.zeros((48,3))
+  for i in range(8):
+    tmp=np.sqrt(np.multiply(vx2[13*i+7:13*i+13],vx2[13*i+7:13*i+13])+np.multiply(vy2[13*i+7:13*i+13],vy2[13*i+7:13*i+13])+np.multiply(vz2[13*i+7:13*i+13],vz2[13*i+7:13*i+13]))
+    print(np.shape(tmp))
+    result2[6*i:6*i+6,0]=np.divide(vx2[13*i+7:13*i+13],tmp)
+    result2[6*i:6*i+6,1]=np.divide(vy2[13*i+7:13*i+13],tmp)
+    result2[6*i:6*i+6,2]=np.divide(vz2[13*i+7:13*i+13],tmp)
+    ax.scatter(c4[13*i+7:13*i+13,0],c4[13*i+7:13*i+13,1],c4[13*i+7:13*i+13,2], c='k')
+    ax.scatter(c4[13*i+7:13*i+13,0]+result2[6*i:6*i+6,0],c4[13*i+7:13*i+13,1]+result2[6*i:6*i+6,1],c4[13*i+7:13*i+13,2]+result2[6*i:6*i+6,2], c='k')
+
+  print(result2)
+  plt.show()
+'''
+        
diff --git a/GridQuality/OptRes1D.py b/GridQuality/OptRes1D.py
new file mode 100644
index 0000000000000000000000000000000000000000..2be28479c4e87d21c97bab084f7ab858e3aebeb3
--- /dev/null
+++ b/GridQuality/OptRes1D.py
@@ -0,0 +1,175 @@
+import grid_analyze_version3 as grid_analyze
+import sys
+import numpy as np
+import matplotlib.pyplot as plt
+from MeshFunctions import *
+import math
+
+nt = 6
+nts = [30,60,120,180,360]
+nrs = [5,10,20,40,80]
+nRad = 20
+nPol = 200
+nps = [50,100,200,400,800]
+Rmajor = 5
+Rminor = 1
+trange = 360
+Rangles = []
+Pangles = []
+Rskew = []
+Pskew = []
+Runeven = []
+Puneven = [] 
+lr = []
+lp = []      
+print('started')
+for l in range(5):
+        lr.append(nrs[l])
+        nr = nrs[l]
+        npol = nPol
+        [tor,rad,pol] = create_toroidal_vertices(nt,nr,npol,trange,Rmajor,Rminor)
+        tor = [t *math.pi/180 for t in tor]
+        result = [0.0,0.0,0.0]
+        result =np.expand_dims(result,axis=0)
+        result = np.repeat(result,nt,axis = 0)
+        result =np.expand_dims(result,axis=0)
+        result = np.repeat(result,npol,axis = 0)
+        result =np.expand_dims(result,axis=0)
+        result = np.repeat(result,nr,axis = 0)
+        
+        for i in range(nr):
+            for j in range(npol):
+                for k in range(nt):
+                    result[i,j,k,0] = rad[k][j*nr+i]
+                    result[i,j,k,1] = pol[k][j*nr+i]
+                    result[i,j,k,2] = tor[k]
+        
+        angles_r, angles_tht = grid_analyze.non_orthogonality(result)
+        uneven_r, uneven_tht = grid_analyze.unevenness(result)
+        skewness_r, skewness_tht = grid_analyze.skewness(result)
+        Rangles.append(np.mean(angles_r))
+        Pangles.append(np.mean(angles_tht))
+        Rskew.append(np.mean(skewness_r))
+        Pskew.append(np.mean(skewness_tht))
+        Runeven.append(np.mean(uneven_r))
+        Puneven.append(np.mean(uneven_tht))
+print(Rangles)
+print(lr)
+print(max(Rangles))
+print([r/max(Rangles) for r in Rangles])	
+plot1 = plt.figure(1)
+plt.xscale('log')
+plt.xlabel('Nb Radial surfaces')
+plt.ylabel('Grid Quality Parameters (Normalized)')
+title = 'Grid Quality (Test Geometry)'
+
+plt.title(title)
+plt.plot(lr,[r/max(Rangles) for r in Rangles], label='Unorthogonality Radial')
+plt.plot(lr,[r/max(Pangles) for r in Pangles], label='Unorthogonality Poloidal')
+plt.plot(lr,[r/max(Rskew) for r in Rskew], label='Skewness Radial')
+plt.plot(lr,[r/max(Pskew) for r in Pskew], label='Skewness Poloidal')
+plt.plot(lr,[r/max(Runeven) for r in Runeven], label='Unevenness Radial')
+plt.plot(lr,[r/max(Puneven) for r in Puneven], label='Unevenness Poloidal')
+plt.legend()
+plt.savefig(title+'Radial'+'+pdf')
+
+plot3 = plt.figure(3)
+plt.xscale('log')
+plt.xlabel('Nb Radial surfaces')
+plt.ylabel('Grid Quality Parameters (Normalized)')
+title = 'Grid Quality (Test Geometry)'
+plt.title(title)
+plt.plot(lr,[r for r in Rangles], label='Unorthogonality Radial')
+plt.plot(lr,[r for r in Pangles], label='Unorthogonality Poloidal')
+plt.plot(lr,[r for r in Rskew], label='Skewness Radial')
+plt.plot(lr,[r for r in Pskew], label='Skewness Poloidal')
+plt.plot(lr,[r for r in Runeven], label='Unevenness Radial')
+plt.plot(lr,[r for r in Puneven], label='Unevenness Poloidal')
+plt.legend()
+plt.savefig(title+'Radialabs'+'+pdf')
+
+print('rangles',Rangles)
+print('pangles',Pangles)
+print('rskew',Rskew)
+print('pskew',Pskew)
+print('run', Runeven)
+print('pun',Puneven)
+Rangles = []
+Pangles = []
+Rskew = []
+Pskew = []
+Runeven = []
+Puneven = [] 
+lr = []
+lp = [] 
+for l in range(5):
+        lp.append(nps[l])
+        nr = nRad
+        npol = nps[l]
+        [tor,rad,pol] = create_toroidal_vertices(nt,nr,npol,trange,Rmajor,Rminor)
+        tor = [t *math.pi/180 for t in tor]
+        result = [0.0,0.0,0.0]
+        result =np.expand_dims(result,axis=0)
+        result = np.repeat(result,nt,axis = 0)
+        result =np.expand_dims(result,axis=0)
+        result = np.repeat(result,npol,axis = 0)
+        result =np.expand_dims(result,axis=0)
+        result = np.repeat(result,nr,axis = 0)
+        
+        for i in range(nr):
+            for j in range(npol):
+                for k in range(nt):
+                    result[i,j,k,0] = rad[k][j*nr+i]
+                    result[i,j,k,1] = pol[k][j*nr+i]
+                    result[i,j,k,2] = tor[k]
+        
+        angles_r, angles_tht = grid_analyze.non_orthogonality(result)
+        uneven_r, uneven_tht = grid_analyze.unevenness(result)
+        skewness_r, skewness_tht = grid_analyze.skewness(result)
+        Rangles.append(np.mean(angles_r))
+        Pangles.append(np.mean(angles_tht))
+        Rskew.append(np.mean(skewness_r))
+        Pskew.append(np.mean(skewness_tht))
+        Runeven.append(np.mean(uneven_r))
+        Puneven.append(np.mean(uneven_tht))
+	
+plot1 = plt.figure(2)
+plt.xlabel('Nb Poloidal surfaces')
+plt.ylabel('Grid Quality Parameters (Normalized)')
+title = 'Grid Quality (Test Geometry)'
+plt.xscale('log')
+
+plt.title(title)
+plt.plot(lp,[r/max(Rangles) for r in Rangles], label='Unorthogonality Radial')
+plt.plot(lp,[r/max(Pangles) for r in Pangles], label='Unorthogonality Poloidal')
+plt.plot(lp,[r/max(Rskew) for r in Rskew], label='Skewness Radial')
+plt.plot(lp,[r/max(Pskew) for r in Pskew], label='Skewness Poloidal')
+plt.plot(lp,[r/max(Runeven) for r in Runeven], label='Unevenness Radial')
+plt.plot(lp,[r/max(Puneven) for r in Puneven], label='Unevenness Poloidal')
+plt.legend()
+plt.savefig(title+'Poloidal'+'.pdf')
+
+plot3 = plt.figure(4)
+plt.xlabel('Nb Poloidal surfaces')
+plt.ylabel('Grid Quality Parameters (Normalized)')
+title = 'Grid Quality (Test Geometry)'
+plt.xscale('log')
+plt.title(title)
+plt.plot(lp,[r for r in Rangles], label='Unorthogonality Radial')
+plt.plot(lp,[r for r in Pangles], label='Unorthogonality Poloidal')
+plt.plot(lp,[r for r in Rskew], label='Skewness Radial')
+plt.plot(lp,[r for r in Pskew], label='Skewness Poloidal')
+plt.plot(lp,[r for r in Runeven], label='Unevenness Radial')
+plt.plot(lp,[r for r in Puneven], label='Unevenness Poloidal')
+plt.legend()
+plt.savefig(title+'Poloidalabs'+'+pdf')
+
+print('rangles',Rangles)
+print('pangles',Pangles)
+print('rskew',Rskew)
+print('pskew',Pskew)
+print('run', Runeven)
+print('pun',Puneven)
+
+
+plt.show()
diff --git a/GridQuality/OptimalResolution.py b/GridQuality/OptimalResolution.py
new file mode 100644
index 0000000000000000000000000000000000000000..120c1036544adbf096e4deef9cf7b60e8286cdcb
--- /dev/null
+++ b/GridQuality/OptimalResolution.py
@@ -0,0 +1,112 @@
+import grid_analyze
+import sys
+import numpy as np
+import matplotlib.pyplot as plt
+from MeshFunctions import *
+import math
+
+nt = 10
+nrs = [5,8,10,12,15,20,25,30,35,40]
+nps = [15,25,35,45,55,66,75,85,95,105]
+Rmajor = 5
+Rminor = 1
+trange = 360
+Rangles = []
+Pangles = []
+Rskew = []
+Pskew = []
+Runeven = []
+Puneven = [] 
+lr = []
+lp = []      
+print('started')
+for l in range(10):
+    for m in range(10):
+        print(l)
+        print(m)
+        lr.append(nrs[l])
+        lp.append(nps[m])
+        nr = nrs[l]
+        npol = nps[m]
+        [tor,rad,pol] = create_toroidal_vertices(nt,nr,npol,trange,Rmajor,Rminor)
+        tor = [t *math.pi/180 for t in tor]
+        result = [0.0,0.0,0.0]
+        result =np.expand_dims(result,axis=0)
+        result = np.repeat(result,nt,axis = 0)
+        result =np.expand_dims(result,axis=0)
+        result = np.repeat(result,npol,axis = 0)
+        result =np.expand_dims(result,axis=0)
+        result = np.repeat(result,nr,axis = 0)
+        
+        for i in range(nr):
+            for j in range(npol):
+                for k in range(nt):
+                    result[i,j,k,0] = rad[k][j*nr+i]
+                    result[i,j,k,1] = pol[k][j*nr+i]
+                    result[i,j,k,2] = tor[k]
+        
+        angles_r, angles_tht = grid_analyze.non_orthogonality(result)
+        uneven_r, uneven_tht = grid_analyze.unevenness(result)
+        skewness_r, skewness_tht = grid_analyze.skewness(result)
+        Rangles.append(np.mean(angles_r))
+        Pangles.append(np.mean(angles_tht))
+        Rskew.append(np.mean(skewness_r))
+        Pskew.append(np.mean(skewness_tht))
+        Runeven.append(np.mean(uneven_r))
+        Puneven.append(np.mean(uneven_tht))
+	
+plot1 = plt.figure(1)
+plt.xlabel('Nb Radial surfaces')
+plt.ylabel('Nb Poloidal Surfaces')
+title = 'Unorthogonality Rad'
+plt.title(title)
+sc1 = plt.scatter(lr, lp, s=20, c=Rangles, cmap='plasma')
+plt.colorbar(sc1)
+plt.savefig(title+'.png')
+
+plot2 = plt.figure(2)
+plt.xlabel('Nb Radial surfaces')
+plt.ylabel('Nb Poloidal Surfaces')
+title = 'Unorthogonality Pol'
+plt.title(title)
+sc2 = plt.scatter(lr, lp, s=20, c=Pangles, cmap='plasma')
+plt.colorbar(sc2)
+plt.savefig(title+'.png')
+
+plot3 = plt.figure(3)
+plt.xlabel('Nb Radial surfaces')
+plt.ylabel('Nb Poloidal Surfaces')
+title = 'Skewness Rad'
+plt.title(title)
+sc3 = plt.scatter(lr, lp, s=20, c=Rskew, cmap='plasma')
+plt.colorbar(sc3)
+plt.savefig(title+'.png')
+
+plot4 = plt.figure(4)
+plt.xlabel('Nb Radial surfaces')
+plt.ylabel('Nb Poloidal Surfaces')
+title = 'Skewness Pol'
+plt.title(title)
+sc4 = plt.scatter(lr, lp, s=20, c=Pskew, cmap='plasma')
+plt.colorbar(sc4)
+plt.savefig(title+'.png')
+
+plot5 = plt.figure(5)
+plt.xlabel('Nb Radial surfaces')
+plt.ylabel('Nb Poloidal Surfaces')
+title = 'Unevenness Rad'
+plt.title(title)
+sc5 = plt.scatter(lr, lp, s=20, c=Runeven, cmap='plasma')
+plt.colorbar(sc5)
+plt.savefig(title+'.png')
+
+plot6 = plt.figure(6)
+plt.xlabel('Nb Radial surfaces')
+plt.ylabel('Nb Poloidal Surfaces')
+title = 'Unevenness Pol'
+plt.title(title)
+sc6 = plt.scatter(lr, lp, s=20, c=Puneven, cmap='plasma')
+plt.colorbar(sc6)
+plt.savefig(title+'.png')
+
+plt.show()
diff --git a/GridQuality/PlotGridQuality.py b/GridQuality/PlotGridQuality.py
new file mode 100644
index 0000000000000000000000000000000000000000..5df1dc044db30c78e1259592af15fb190a8042f2
--- /dev/null
+++ b/GridQuality/PlotGridQuality.py
@@ -0,0 +1,79 @@
+import grid_analyze_version4 as grid_analyze
+import sys
+import numpy as np
+import matplotlib.pyplot as plt
+from MeshFunctions import *
+import math
+from gg import GG
+
+metric = int(sys.argv[1])
+metrics = ['Radial Skewness', 'Poloidal Skewness', 'Radial Unevenness', 'Poloidal Unevenness', 'Radial Unorthogonality', 'Poloidal Unorthogonality'] 
+print(metric,metrics[metric])
+nt = 10
+nr = 20
+npol = 50
+trange = 36
+Rmajor = 500
+Rminor = 100
+
+[tor,rad,pol] = create_toroidal_vertices(nt,nr,npol,trange,Rmajor,Rminor)
+tor = [t *math.pi/180 for t in tor]
+result = [0.0,0.0,0.0]
+result =np.expand_dims(result,axis=0)
+result = np.repeat(result,nt,axis = 0)
+result =np.expand_dims(result,axis=0)
+result = np.repeat(result,npol,axis = 0)
+result =np.expand_dims(result,axis=0)
+result = np.repeat(result,nr,axis = 0)
+        
+for i in range(nr):
+    for j in range(npol):
+        for k in range(nt):
+            result[i,j,k,0] = rad[k][j*nr+i]
+            result[i,j,k,1] = pol[k][j*nr+i]
+            result[i,j,k,2] = tor[k]
+angles_r, angles_tht = grid_analyze.non_orthogonality(result)
+uneven_r, uneven_tht = grid_analyze.unevenness(result)
+skewness_r, skewness_tht = grid_analyze.skewness(result)
+
+results = [skewness_r, skewness_tht,uneven_r,uneven_tht,angles_r,angles_tht]
+print('shape',angles_r.shape)
+
+res = results[metric]
+angles_r2D = np.around(res[:,:,1],9)
+nr=nr-1
+
+pos = result[:,:,1,0:2]
+#pos = positions.reshape((npol*nr,-1))
+centerpos = np.zeros(((npol-1)*(nr-1),2))
+anglesR = np.zeros(((npol-1)*(nr-1),1))
+print(anglesR)
+for i in range(npol-1):
+    for j in range(nr-1):
+        if i == npol-1:
+            ind=0
+        else:
+            ind=i+1
+        centerpos[i*(nr-1)+j,0] = (pos[j,i,0]+pos[j+1,i,0]+pos[j,ind,0]+pos[j+1,ind,0])/4
+        centerpos[i*(nr-1)+j,1] = (pos[j,i,1]+pos[j+1,i,1]+pos[j,ind,1]+pos[j+1,ind,1])/4
+        anglesR[i*(nr-1)+j,0] = angles_r2D[j,i]
+
+anglesR.reshape(anglesR.shape[0])
+res = [r for r in anglesR[:,0]]
+y = [r for r in centerpos[:,1]]
+x = [r for r in centerpos[:,0]]
+
+#Plot the mesh
+grid = GG()
+dat = grid.read_mesh3d(fn='GEOMETRY_3D_DATA')
+grid.plot_mesh3d(dat,2)
+
+plt.xlabel('Radial Position')
+plt.ylabel('Vertical Position')
+title = metrics[metric]
+plt.title(title)
+sc1 = plt.scatter(x,y, s=20, c=res, cmap='plasma')
+plt.colorbar(sc1)
+plt.savefig(title+'.png')
+
+plt.show()
diff --git a/GridQuality/PlotRealGridQuality.py b/GridQuality/PlotRealGridQuality.py
new file mode 100644
index 0000000000000000000000000000000000000000..42719cfa299b32c5992ae462b87533d1340e680c
--- /dev/null
+++ b/GridQuality/PlotRealGridQuality.py
@@ -0,0 +1,95 @@
+import grid_analyze_version4 as grid_analyze
+import sys
+import numpy as np
+import matplotlib.pyplot as plt
+from MeshFunctions import *
+import math
+from gg import GG
+
+metric = int(sys.argv[1])
+tpos = int(sys.argv[2])
+metrics = ['Radial Skewness', 'Poloidal Skewness', 'Radial Unevenness', 'Poloidal Unevenness', 'Radial Unorthogonality', 'Poloidal Unorthogonality'] 
+print(metric,metrics[metric])
+
+grid = GG()
+dat = grid.read_mesh3d(fn='GEOMETRY_3D_DATA')
+
+R = dat.R
+z = dat.z
+p = dat.p
+
+print(z.shape)
+npol = R.shape[0]
+nr = R.shape[1]
+nt = R.shape[2]
+#tor = [t *math.pi/180 for t in tor]
+result = [0.0,0.0,0.0]
+result =np.expand_dims(result,axis=0)
+result = np.repeat(result,nt,axis = 0)
+result =np.expand_dims(result,axis=0)
+result = np.repeat(result,npol,axis = 0)
+result =np.expand_dims(result,axis=0)
+result = np.repeat(result,nr,axis = 0)
+        
+for i in range(nr):
+    for j in range(npol):
+        for k in range(nt):
+            result[i,j,k,0] = R[j,i,k]
+            result[i,j,k,1] = z[j,i,k]
+            result[i,j,k,2] = p[k]
+angles_r, angles_tht = grid_analyze.non_orthogonality(result)
+uneven_r, uneven_tht = grid_analyze.unevenness(result)
+skewness_r, skewness_tht = grid_analyze.skewness(result)
+
+results = [skewness_r, skewness_tht,uneven_r,uneven_tht,angles_r,angles_tht]
+print('shape',angles_r.shape)
+
+res = results[metric]
+angles_r2D = np.around(res[:,:,1],9)
+nr=nr-1
+
+
+
+pos = result[:,:,tpos,0:2]
+#pos = positions.reshape((npol*nr,-1))
+centerpos = np.zeros(((npol-1)*(nr-1),2))
+anglesR = np.zeros(((npol-1)*(nr-1),1))
+
+for i in range(npol-1):
+    for j in range(nr-1):
+        if i == npol-1:
+            ind=0
+        else:
+            ind=i+1
+        centerpos[i*(nr-1)+j,0] = (pos[j,i,0]+pos[j+1,i,0]+pos[j,ind,0]+pos[j+1,ind,0])/4
+        centerpos[i*(nr-1)+j,1] = (pos[j,i,1]+pos[j+1,i,1]+pos[j,ind,1]+pos[j+1,ind,1])/4
+        anglesR[i*(nr-1)+j,0] = angles_r2D[j,i]
+
+anglesR.reshape(anglesR.shape[0])
+res = [r for r in anglesR[:,0]]
+y = [r for r in centerpos[:,1]]
+x = [r for r in centerpos[:,0]]
+
+#Plot the mesh
+grid = GG()
+dat = grid.read_mesh3d(fn='GEOMETRY_3D_DATA')
+grid.plot_mesh3d(dat,tpos)
+
+plt.xlabel('Radial Position')
+plt.ylabel('Vertical Position')
+title = metrics[metric]
+plt.title(title)
+sc1 = plt.scatter(x,y, s=20, c=res, cmap='plasma')
+plt.colorbar(sc1)
+plt.savefig(title+'Tor'+str(tpos)+'.pdf')
+
+plot2 = plt.figure(2)
+plt.xlabel('Radial Position')
+plt.ylabel('Vertical Position')
+title = metrics[metric]
+plt.title(title)
+sc1 = plt.scatter(x,y, s=20, c=res, cmap='plasma')
+plt.colorbar(sc1)
+plt.savefig(title+'OnlyQuality'+'Tor'+str(tpos)+'.pdf')
+
+plt.show()
diff --git a/GridQuality/gg.py b/GridQuality/gg.py
new file mode 100644
index 0000000000000000000000000000000000000000..d6bd340dec04527607952072c4c00ee07544d957
--- /dev/null
+++ b/GridQuality/gg.py
@@ -0,0 +1,235 @@
+'''
+import gg; reload(gg); g = gg.GG()
+import Kisslinger as kis; reload(kis)
+flx = g.read_flux(); g.plot_flux(flx)
+lc,lm,li = kis.load_components()
+msh = g.read_mesh2d();g.plot_mesh2d(msh); kis.plot_components(lc,phi=0)
+grd = g.read_mesh3d(); phi = g.plot_mesh3d(grd,i=0,ridx=[15,20,30,40]); kis.plot_components(lc,phi)  
+'''
+import numpy as np
+from matplotlib import pyplot as plt
+import utils.data as data
+import Kisslinger
+
+class GG():
+
+    def __init__(self):
+        pass
+        return
+
+    def read_trace(self,fn='out'):
+        dat = np.loadtxt(fn)
+        return dat
+
+    def plot_trace(self,dat,fig=None):
+         if fig == None:
+            fig = plt.figure()
+         plt.plot(dat[:,0],dat[:,1],'+',ms=0.5)
+         plt.show(block=False)
+         return fig
+
+    def read_flux(self,fn='SURFS_OUT'):
+        fp = open(fn,'br')
+        line = fp.readline().split()
+        ns = int(line[0])
+
+        x = np.zeros((ns,1000,2))
+        y = np.zeros((ns,1000,2))
+        npp = np.zeros((ns,2))
+        npp = np.array(npp,dtype=np.int)
+
+        for i in range(ns):
+            line = fp.readline().split()
+            npp[i,0],npp[i,1] = int(line[0]),int(line[1])
+            for j in range(npp[i,0]):
+                line = fp.readline().split()
+#                for k in range(npp[i,1]):
+#                    x[i,j*npp[i,1]+k] = line[0+2*k]
+#                    y[i,j*npp[i,1]+k] = line[1+2*k]
+                x[i,j,0] = line[0]
+                y[i,j,0] = line[1]
+                x[i,j,1] = line[2]
+                y[i,j,1] = line[3]
+
+        fp.close()
+
+        dat= data.data()
+        dat.x = x
+        dat.y = y
+        dat.ns = ns
+        dat.npp = npp
+        return dat
+
+    def plot_flux(self,dat,idx=0,fig=None,c=None):
+        if fig == None:
+            fig = plt.figure()
+        for i in range(dat.ns):
+            plt.plot(dat.x[i,0:dat.npp[i,0],idx],dat.y[i,0:dat.npp[i,0],idx],'+',c=c)
+        plt.show(block = False)
+        return fig
+
+    def read_mesh2d(self,fn='MESH_2D'):
+        fp = open(fn,'r')
+        line = fp.readline().split()
+        nx,ny,nz = int(line[0]),int(line[1]),int(line[2])
+
+        line = fp.readline().split()
+        tmp = float(line[0])
+
+        npp = nx*ny*nz
+        x = np.zeros(npp)
+        y = np.zeros(npp)
+        
+        idx = 0
+        tx = []
+        while idx < npp:
+            line = fp.readline().split()
+            idx += len(line)
+            for item in line:
+                if not('E' in item): item = 0.
+                item = float(item)
+                if item > 1E3 or item < -1E3: item = 0.
+                tx.append(float(item))
+
+        idx = 0
+        ty = []
+        while idx < npp:
+            line = fp.readline().split()
+            idx += len(line)
+            for item in line:
+                if not('E' in item): item = 0.
+                item = float(item)
+                if item > 1E3 or item < -1E3: item = 0.
+                ty.append(float(item))
+
+
+        fp.close()
+
+        dat= data.data()
+        dat.x = np.array(tx)
+        dat.y = np.array(ty)
+        dat.npp = npp
+        dat.nx,dat.ny,dat.nz = nx,ny,nz
+        return dat
+
+    def plot_mesh2d(self,dat,color='green',fig=None):
+        if fig == None:
+            fig = plt.figure(1)
+        x = dat.x.reshape((dat.ny,dat.nx))
+        y = dat.y.reshape((dat.ny,dat.nx))
+        for i in range(dat.nx):
+            plt.plot(x[:,i],y[:,i],'-',c=color,lw=0.2)
+#        plt.show(block=False)
+        return fig
+
+    def read_mesh3d(self,fn='GRID_3D_DATA'):
+        ncol = 6
+        fp = open(fn,'r')
+        line = fp.readline().split()
+        nx,ny,nz = int(line[0]),int(line[1]),int(line[2])
+        npp = nx*ny
+
+        R = np.zeros((ny,nx,nz))
+        z = np.zeros((ny,nx,nz))
+        p = np.zeros(nz)
+        for iz in range(nz):
+            p[iz] = float(fp.readline())
+            #print(p[iz])
+            tmp = []
+            for i in range(int(np.ceil(npp/ncol))):
+                line = fp.readline().split()
+                tmp.extend([float(item) for item in line])
+            R[:,:,iz] = np.array(tmp,dtype=np.float).reshape(ny,nx)
+            tmp = []
+            for i in range(int(np.ceil(npp/ncol))):
+                line = fp.readline().split()
+                tmp.extend([float(item) for item in line])
+            z[:,:,iz] = np.array(tmp,dtype=np.float).reshape(ny,nx)
+        dat = data.data()
+        dat.R = R 
+        dat.z = z
+        dat.p = p
+        return dat
+
+    def plot_mesh3d(self,dat,i,pidx=[],ridx=[]):
+        nx, ny = dat.R[:,:,0].shape
+        for j in range(nx):
+            plt.plot(dat.R[j,:,i],dat.z[j,:,i],'b',lw=0.2, color='green')
+        for j in range(ny):
+            plt.plot(dat.R[:,j,i],dat.z[:,j,i],'b',lw=0.2,color='green')
+        for j in pidx:
+            plt.plot(dat.R[j,:,i],dat.z[j,:,i],'r',lw=0.5,color='green')
+        for j in ridx:
+            plt.plot(dat.R[:,j,i],dat.z[:,j,i],'r',lw=0.5,color='green')
+        #plt.plot(dat.R[:,:,i],dat.z[:,:,i],'b+')
+#        plt.show(block=True)
+        print('Phi = %.2f' %(dat.p[i]))
+        return dat.p[i]
+
+    def plot_grid_mayavi(self,grd,idx=10,typ=0,\
+                         rep='wireframe',opacity=0.99,color=(1,1,1),\
+                         fig=None):
+      degtorad = np.pi/180.
+      from mayavi import mlab
+      import utils.geom as geom
+      shp = grd.R.shape
+      nphi = shp[2]
+      phi = np.ones(shp)
+      for i in range(nphi):
+          phi[:,:,i] = grd.p[i]*degtorad
+      x,y,z = geom.rzptoxyz(grd.R.flatten(),grd.z.flatten(),phi.flatten())
+      x = x.reshape(shp)
+      y = y.reshape(shp)
+      z = z.reshape(shp)
+      print(x.shape)
+
+      if fig is None: fig = mlab.figure()
+      #Poloidal Surface (tor. Cut)
+      if typ == 0:
+        mlab.mesh(x[:,:,idx],y[:,:,idx],z[:,:,idx],\
+                  representation=rep,color=color,\
+                  opacity=opacity)
+      #Toroidal surface (rad. cut)
+      elif typ == 1:
+        mlab.mesh(x[:,idx,:],y[:,idx,:],z[:,idx,:],\
+                  representation=rep,color=color,\
+                  opacity=opacity)
+      #Tor. surfaces (pol. cuts)
+      elif typ == 2:
+        mlab.surf(x[idx,:,:],y[idx,:,:],z[idx,:,:],\
+                  representation=rep,color=color,\
+                  opacity=opacity)
+      return fig
+
+    def read_intersections(self):
+      fp = open('PLATES_MAG')
+      zone = []
+      target = []
+      idxp = []
+      nidx = []
+      idx = []
+      i = 0
+      while True:
+        line =  fp.readline()
+        i += 1
+        print(i)
+        line = line.split()
+        zone.append(int(line[0]))
+        target.append(int(line[1]))
+        idxp.append(int(line[2]))
+        nidx.append(int(line[3]))
+        #Doesn't work due to line break after 8 entries
+        tmp = [int(item) for item in line[4:]]
+        if nidx[-1] > 8:
+          line = fp.readline().split()
+          for item in line:
+            tmp.append(item)
+        idx.append(tmp)        
+      dat = data.data()
+      dat.zone = zone
+      dat.target = target
+      dat.idxp = idxp
+      dat.nidx = nidx
+      dat.idx = idx
+      dat.num = len(zone)
+      return dat
diff --git a/GridQuality/grid_analyze.py b/GridQuality/grid_analyze.py
index 5be17c1d3b9beb975234c7a8e374275402f1d0fb..31fc5fa03a1593270724ffefaa028a9177a67912 100644
--- a/GridQuality/grid_analyze.py
+++ b/GridQuality/grid_analyze.py
@@ -1,7 +1,6 @@
 import numpy as np
 from matplotlib import pyplot as plt
-from quick_g import quick_g_plot
-import sys
+from combine.quick_g import quick_g_plot
 
 # not whole loop, only tenth -> phi doesnt loop    
 def yield_grid_cells(g, r_lahead=1, tht_lahead=1, phi_lahead=0):
@@ -185,8 +184,7 @@ def nonconvex(g):
 
 if __name__ == "__main__":
     try:
-        name = sys.argv[1]
-        g = np.load(name)
+        g = np.load("g_possible.npy")
         # g = np.load("g_kaputt.npy")
     except:
         print("Need to generate grid array 'g.npy' with another program")
@@ -195,16 +193,11 @@ if __name__ == "__main__":
     
     #nonc = nonconvex(g) # TODO  1 == good, 0 == nonconvex, -1 == weird
     
-    volumes = volume(g)
-    angles_r, angles_tht = non_orthogonality(g)
-    uneven_r, uneven_tht = unevenness(g)
-    skewness_r, skewness_tht = skewness(g)
-    print(f"{np.sum(volumes):.3E} <- Whole volume in m^3")
-    print(f"{np.min(volumes):.3E}, {np.max(volumes):.3E} <- Min, Max cell volume in m^3")
-    print(f"{np.mean(volumes):.3E}, {np.std(volumes):.3E} <- Mean, Std of cell volume in m^3")
-    print(f"{np.mean(uneven_r)} <- Uneven r")
-    print(f"{np.mean(uneven_tht)} <- Uneven theta")
-    print(f"{np.mean(skewness_r)} <- Skewness r")
-    print(f"{np.mean(skewness_tht)} <- Skewness theta")
-    print(f"{np.mean(angles_r)} <- Unorthogonality r")
-    print(f"{np.mean(angles_tht)} <- Unorthogonality theta")
+    # volumes = volume(g)
+    # angles_r, angles_tht = non_orthogonality(g)
+    # uneven_r, uneven_tht = unevenness(g)
+    # skewness_r, skewness_tht = skewness(g)
+    # print(f"{np.sum(volumes):.3E} <- Whole volume in m^3")
+    # print(f"{np.min(volumes):.3E}, {np.max(volumes):.3E} <- Min, Max cell volume in m^3")
+    # print(f"{np.mean(volumes):.3E}, {np.std(volumes):.3E} <- Mean, Std of cell volume in m^3")
+    
\ No newline at end of file
diff --git a/GridQuality/grid_analyze_version3.py b/GridQuality/grid_analyze_version3.py
new file mode 100644
index 0000000000000000000000000000000000000000..846c86cd5e974c4d35e876bcdc8ad96e8501bc0e
--- /dev/null
+++ b/GridQuality/grid_analyze_version3.py
@@ -0,0 +1,267 @@
+import numpy as np
+from quick_g import quick_g_plot
+
+# not whole loop, only tenth -> phi doesnt loop    
+def yield_grid_cells(g, r_lahead=0, tht_lahead=1, phi_lahead=0):
+    g = g[:,:-1] # remove double tht=0,2pi values
+    # implement general looping edges g[r,tht,phi,c]; tht loops, r, phi not
+    ors, othts, ophis, _ = g.shape  # original xxx size
+    g = np.pad(g, [(0,0), (0,tht_lahead), (0,0), (0,0)], mode='wrap')
+    
+    # create smaller output array
+    h = np.zeros((ors-r_lahead, othts, ophis-phi_lahead,
+                  1+r_lahead, tht_lahead+1, phi_lahead+1, 3))
+    
+    # iterate over all parameter coordinates
+    for cr in range(ors-r_lahead):
+        for ctht in range(othts): #tht loops
+            for cphi in range(ophis-phi_lahead):
+                h[cr, ctht, cphi] = g[cr:cr+r_lahead+1,
+                                      ctht:ctht+tht_lahead+1,
+                                      cphi:cphi+phi_lahead+1]    
+    return h
+     
+def _cell_centers(g, x, direction):
+    mid = np.mean(x[0:2,0:2], axis=(0,1))[0]
+    if direction == "r":
+        mid_af = np.mean(x[1:3,0:2], axis=(0,1))[0]
+    else:
+        mid_af = np.mean(x[0:2,1:3], axis=(0,1))[0]
+    return mid, mid_af
+         
+def _facemid(g, x, direction):
+    if direction == "r":
+        facemid_af = np.mean(x[1,0:2], axis=0)[0]
+    else:
+        facemid_af = np.mean(x[0:2,1], axis=0)[0]
+    return facemid_af    
+        
+def volume(g): # APPROXIMATION
+    wv = yield_grid_cells(g, 1, 1, 1)  # window view
+    # calculate coord differences 
+    r = wv[...,0,0,0,0]
+    dr = np.diff(wv[...,:,0,0,:], axis=-2)[...,0,:]
+    dz = np.diff(wv[...,0,:,0,:], axis=-2)[...,0,:]
+    drphi = np.diff(wv[...,0,0,:,:], axis=-2)[...,0,:]
+    # change phi angle difference to space difference by multiplying with r
+    drphi *= r.reshape(r.shape + (1,)) # dphi * r
+    # calculate volume from column vector determinant
+    vectors = np.empty(dr.shape + (3,))
+    vectors[...,0], vectors[...,1], vectors[...,2] = dr, dz, drphi
+    vols = np.linalg.det(vectors)
+    return vols
+      
+def _non_orthogonality(g, direction):
+    assert direction == "r" or direction == "tht"
+    
+    ws = (2,1,0) if direction=="r" else (1,2,0)  # window size
+    wv = yield_grid_cells(g, *ws)  # window view
+    # the two directions of cell edges, along r and along tht
+    rs, thts, phis = wv.shape[:3]
+    # angles on faces
+    af = np.empty((rs, thts, phis))
+    
+    # using numpy vector functions is fast, but takes ages to code. will loop
+    for r in range(rs):
+        for tht in range(thts):
+            for phi in range(phis):
+                x = wv[r,tht,phi]
+                # cell centers
+                mid, mid_af = _cell_centers(g, x, direction)
+                # direction of cell midpoint connections 
+                v_af = (mid_af - mid)/np.linalg.norm(mid_af - mid)
+                # direction of edges 
+                t_af = x[1,1]-x[1,0] if direction == "r" else x[1,1]-x[0,1]
+                tn_af = t_af/np.linalg.norm(t_af)
+                # angle between edge direction and midpoint direction
+                af[r,tht,phi] = np.abs(np.arcsin(np.dot(t_af, v_af)))
+    return af
+    
+def non_orthogonality(g):
+    a_r = _non_orthogonality(g, "r")
+    a_tht = _non_orthogonality(g, "tht")
+    a_r *= 180/np.pi; a_tht *= 180/np.pi
+    return a_r, a_tht
+
+def _unevenness(g, direction):
+    assert direction == "r" or direction == "tht"
+    
+    ws = (2,1,0) if direction=="r" else (1,2,0)  # window size
+    wv = yield_grid_cells(g, *ws)  # window view
+    # the two directions of cell edges, along r and along tht
+    rs, thts, phis = wv.shape[:3]
+    # unevenness on faces, also named a
+    af = np.empty((rs, thts, phis))
+    
+    # using numpy vector functions is fast, but takes ages to code. will loop
+    for r in range(rs):
+        for tht in range(thts):
+            for phi in range(phis):
+                x = wv[r,tht,phi,...,:2]
+                # cell centers
+                mid, mid_af = _cell_centers(g, x, direction)
+                # direction of cell midpoint connections 
+                v_af = (mid_af - mid)/np.linalg.norm(mid_af - mid)
+                # cell midpoint line midpoint m_f
+                linemid_af = np.mean([mid, mid_af], axis=0)
+                # cell corner connection line (edge) midpoint c_f
+                facemid_af = _facemid(g, x, direction)
+                # line distance to point
+                ld_af = np.linalg.norm(np.cross(v_af, facemid_af-mid_af))
+                # rotated cell mitpoint dir by 90 deg
+                n_af = np.array([-v_af[1], v_af[0]])
+                # check if needed to add or remove
+                f_af = np.sign(np.dot(n_af, facemid_af-mid_af))
+                # move corner connection line (edge) midpoint on line c_f'
+                proj_facemid_af = facemid_af - f_af*ld_af*n_af
+                # distance of c_f' and m_f
+                num_dist_af = np.linalg.norm(proj_facemid_af-linemid_af)
+                # distance between two cell midpoints 
+                den_dist_af= np.linalg.norm(mid_af - mid)
+                # measure of unevenness
+                af[r,tht,phi] = num_dist_af/den_dist_af
+    return af
+
+def unevenness(g):
+    a_r = _unevenness(g, "r")
+    a_tht = _unevenness(g, "tht")
+    return a_r, a_tht
+
+def _skewness(g, direction):
+    assert direction == "r" or direction == "tht"
+    
+    ws = (2,1,0) if direction=="r" else (1,2,0)  # window size
+    wv = yield_grid_cells(g, *ws)  # window view
+    # the two directions of cell edges, along r and along tht
+    rs, thts, phis = wv.shape[:3]
+    # unevenness on faces, also named a
+    af = np.empty((rs, thts, phis))
+    
+    # using numpy vector functions is fast, but takes ages to code. will loop
+    for r in range(rs):
+        for tht in range(thts):
+            for phi in range(phis):
+                x = wv[r,tht,phi,...,:2]
+                # cell centers
+                mid, mid_af = _cell_centers(g, x, direction)
+                # direction of cell midpoint connections 
+                v_af = (mid_af - mid)/np.linalg.norm(mid_af - mid)
+                # cell corner connection line (edge) midpoint c_f
+                facemid_af = _facemid(g, x, direction)
+                # line distance to point == ||c_f' - c_f||
+                ld_af = np.abs(np.cross(v_af, facemid_af-mid_af))
+                # distance between two cell midpoints 
+                den_dist_af = np.linalg.norm(mid_af - mid)
+                # measure of skewness
+                af[r,tht,phi] = ld_af/den_dist_af
+    return af
+
+def skewness(g):
+    a_r = _skewness(g, "r")
+    a_tht = _skewness(g, "tht")            
+    return a_r, a_tht
+
+# THIS WORKS NOW, but not before !!!!
+def convex(g):
+    wv = yield_grid_cells(g, 1, 1, 0)  # window view
+    # the two directions of cell edges, along r and along tht
+    rs, thts, phis = wv.shape[:3]
+    # skewness on r and tht faces, also named a
+    is_convex = np.zeros((rs, thts, phis), dtype=int)
+    
+    # using numpy vector functions is fast, but takes ages to code. will loop
+    for r in range(rs):
+        for tht in range(thts):
+            for phi in range(phis):
+                x = wv[r,tht,phi,...,0,:2]
+                # from point, to point, *other point indices to check against
+                # its just looping through the subarray, feel free to improve
+                indices = [[(0,0), (1,0), (1,1), (0,1)],
+                           [(1,0), (1,1), (0,1), (0,0)],
+                           [(1,1), (0,1), (0,0), (1,0)],
+                           [(0,1), (0,0), (1,0), (1,1)]]
+                for (of, to, A, B) in indices:
+                    di = x[to] - x[of] # direction of line
+                    # check if A and B on same side of line. If so, then 
+                    # the crossproduct with the direction has the same sign
+                    vA = np.sign(np.cross(di, x[A]-x[of]))
+                    vB = np.sign(np.cross(di, x[B]-x[of]))
+                    is_convex[r,tht,phi] += vA + vB
+    # there are 8 relations per cell that are tested (and added) if all are
+    # true, the result will be 8. The smaller the number the worse the cell
+    is_convex = ( is_convex == 8 )
+    return is_convex
+
+def fast_convex(g):
+    wv = yield_grid_cells(g, 1, 1, 0)  # window view
+    wv =  wv[...,0,:2] # ignore phi component in cell and coord
+    # the two directions of cell edges, along r and along tht
+    rs, thts, phis = wv.shape[:3]
+    # the check works in the way, that the cell has 4 edges and for
+    # each edge (going counterclockwise), the other cell corners 
+    # need to be on the left side of the edge for the cell to be convex
+    # cell edge directions along parametrisation in r and tht
+    dir_r, dir_tht  = np.diff(wv, axis=3), np.diff(wv, axis=4)
+    # we need to invert half the edge- (2nd in r and 1st in tht) 
+    # direction vectors so they point conterclockwise
+    # views into dir_r, d_tht for easy access
+    dir_r0, dir_r1 = dir_r[...,0,0,:], dir_r[...,0,1,:] 
+    dir_tht0, dir_tht1 = dir_tht[...,0,0,:], dir_tht[...,1,0,:]
+    dir_r1 *= -1; dir_tht0 *= -1
+    # now we need to check if the vectors in order of
+    # r[0], tht[1], (-)r[1], (-)tht[0] are always turning left
+    # from the last vector (between 0° and 180°).
+    # cell convex IFF above true for all four edges
+    cross_values = np.empty((rs, thts, phis, 4))
+    cross_values[...,0] = np.cross(dir_r0, dir_tht1)
+    cross_values[...,1] = np.cross(dir_tht1, dir_r1)
+    cross_values[...,2] = np.cross(dir_r1, dir_tht0)
+    cross_values[...,3] = np.cross(dir_tht0, dir_r0)
+    # we can take the min > 0 to see if all of them are > 0
+    is_convex = cross_values.min(axis=-1) > 0
+    return is_convex
+    
+def _stats(arr, pprint):
+    arr = arr.flatten()
+    mean = np.mean(arr)
+    median = np.median(arr)
+    std = np.std(arr)
+    minv = np.min(arr)
+    maxv = np.max(arr)
+    
+    if pprint:
+        print(f"""Stat values for {pprint}: 
+        µ={mean:.3e}, σ={std:.3e},
+        min:{minv:.3e}, max:{maxv:.3e}, med:{median:.3e}""" )
+    return {"mean":mean, "std":std, "min":minv, "max":maxv, "median":median}
+
+# return (mean, std, minv, maxv, median) for every argument
+def stats(*arrs, combine=True, pprint=False):
+    if combine:
+        arrs = np.concatenate(arrs)
+        return _stats(arrs, pprint)
+    else:
+        stat_outputs = []
+        for i, arr in enumerate(arrs):
+            stat_outputs.append(_stats(arr, pprint + f" {i}"))
+        return stat_outputs
+
+if __name__ == "__main__":
+    try:
+        g = np.load("g.npy")
+        # g = np.load("g_kaputt.npy")
+    except:
+        print("Need to generate grid array 'g.npy' with another program")
+        raise
+    
+    # quick_g_plot(g, phi=0)
+    # nonc = nonconvex(g) # TODO  1 == good, 0 == nonconvex, -1 == weird
+    
+    # volumes = volume(g)
+    angles_r, angles_tht = non_orthogonality(g)
+    # uneven_r, uneven_tht = unevenness(g)
+    # skewness_r, skewness_tht = skewness(g)
+    # print(f"{np.sum(volumes):.3E} <- Whole volume in m^3")
+    # print(f"{np.min(volumes):.3E}, {np.max(volumes):.3E} <- Min, Max cell volume in m^3")
+    # print(f"{np.mean(volumes):.3E}, {np.std(volumes):.3E} <- Mean, Std of cell volume in m^3")
+    
diff --git a/GridQuality/grid_analyze_version4.py b/GridQuality/grid_analyze_version4.py
new file mode 100644
index 0000000000000000000000000000000000000000..4e7284167a685904984ed1a9f0581d078dae56ca
--- /dev/null
+++ b/GridQuality/grid_analyze_version4.py
@@ -0,0 +1,396 @@
+import numpy as np
+from quick_g import quick_g_plot
+
+# not whole loop, only tenth of torus -> phi doesnt loop
+# would be way nicer with xarray.rolling() [+edge periodicity differentiation]
+def yield_grid_cells(g, r_lahead=1, tht_lahead=1, phi_lahead=0):
+    g = g[:,:-1] # remove double tht=0,2pi values
+    # implement general looping edges g[r,tht,phi,c]; tht loops, r, phi not
+    ors, othts, ophis, _ = g.shape  # original xxx size
+    g = np.pad(g, [(0,0), (0,tht_lahead), (0,0), (0,0)], mode='wrap')
+    
+    # create smaller output array
+    h = np.zeros((ors-r_lahead, othts, ophis-phi_lahead,
+                  1+r_lahead, tht_lahead+1, phi_lahead+1, 3))
+    
+    # iterate over all parameter coordinates
+    for cr in range(ors-r_lahead):
+        for ctht in range(othts): #tht loops
+            for cphi in range(ophis-phi_lahead):
+                h[cr, ctht, cphi] = g[cr:cr+r_lahead+1,
+                                      ctht:ctht+tht_lahead+1,
+                                      cphi:cphi+phi_lahead+1]    
+    return h
+
+# convert the direction dependent functions to one that
+# returns the results of both directions as a tuple
+def r_tht_direction_tuple(f):
+    return lambda g: (f(g, 'r'), f(g, 'tht'))
+
+def _cell_centers(g, x, direction):
+    mid = np.mean(x[0:2,0:2], axis=(0,1))[0]
+    if direction == "r":
+        mid_af = np.mean(x[1:3,0:2], axis=(0,1))[0]
+    else:
+        mid_af = np.mean(x[0:2,1:3], axis=(0,1))[0]
+    return mid, mid_af
+         
+def _facemid(g, x, direction):
+    if direction == "r":
+        facemid_af = np.mean(x[1,0:2], axis=0)[0]
+    else:
+        facemid_af = np.mean(x[0:2,1], axis=0)[0]
+    return facemid_af
+
+# see triangulated_volume
+# surfaces: toroidal_[up/down]stream, [radial/poloidal]_[up/down]stream_[1/2]
+# for 00->11, 1 is (0,0)-(1,0)-(1,1), 2 is (0,0)-(1,1)-(0,1) in correct order
+# for 01->10, 1 is (0,1)-(1,0)-(0,0), 2 is(0,1)-(1,0)-(1,1) in correct order
+def triangulated_surface(g, triangle_cut={"r":"00->11", "tht":"00->11"},
+                         surface="all", coords=False, wv=None, xyz_g=None):
+    # give option to provide pregenerated xyz_g to save time, used for "all"
+    if xyz_g is None:
+        xyz_g = np.empty_like(g)
+        xyz_g[...,0] = g[...,0] * np.cos(g[...,2])  # x = r cos phi
+        xyz_g[...,1] = g[...,0] * np.sin(g[...,2])  # y = r sin phi
+        xyz_g[...,2] = g[...,1]                     # z = z
+    # give option to provide pregenerated wv to save time, used for "all"
+    if wv is None:
+        wv = yield_grid_cells(xyz_g, 1, 1, 1)
+
+    tc = lambda d, v1, v2: v1 if triangle_cut[d] == "00->11" else v2
+    # r,tht,phi indices list for surface in cell
+    # hopefully oriented so all calculated normals point outwards
+    surfaces_indices = {
+        "toroidal_upstream": [(0,0,0), (1,0,0), (1,1,0), (0,1,0)],
+        "toroidal_downstream": [(0,0,1), (0,1,1), (1,1,1), (1,0,1)],
+        "radial_upstream_1":
+            tc("r", [(0,0,0),(0,1,0),(0,1,1)], [(0,0,0),(0,1,0),(0,0,1)]),
+        "radial_upstream_2":
+            tc("r", [(0,0,0),(0,1,1),(0,0,1)], [(0,0,1),(0,1,0),(0,1,1)]),
+        "radial_downstream_1":
+            tc("r", [(1,0,0),(1,1,1),(1,1,0)], [(1,0,0),(1,0,1),(1,1,0)]),
+        "radial_downstream_2":
+            tc("r", [(1,0,0),(1,0,1),(1,1,1)], [(1,1,0),(1,0,1),(1,1,1)]),
+        "poloidal_upstream_1":
+            tc("tht", [(0,0,0),(1,0,1),(1,0,0)], [(0,0,0),(0,0,1),(1,0,0)]),
+        "poloidal_upstream_2":
+            tc("tht", [(0,0,0),(0,0,1),(1,0,1)], [(1,0,0),(0,0,1),(1,0,1)]),
+        "poloidal_downstream_1":
+            tc("tht", [(0,1,0),(1,1,0),(1,1,1)], [(0,1,0),(1,1,0),(0,1,1)]),
+        "poloidal_downstream_2":
+            tc("tht", [(0,1,0),(1,1,1),(0,1,1)], [(0,1,1),(1,1,1),(0,1,1)]),
+    }
+    if surface != "all":
+        assert surfaces_indices.get(surface, None) is not None
+
+        c_len = 4 if "toroidal" in surface else 3
+
+        out_coords = np.empty((*wv.shape[:3], c_len, 3)) # r, tht, phi, point, xyz
+        for i, pnt_index in enumerate(surfaces_indices[surface]):
+            p_r, p_tht, p_phi = pnt_index
+            out_coords[:,:,:,i,:] = wv[:,:,:,p_r,p_tht,p_phi,:]
+
+        if coords: return out_coords
+
+        # otherwise calculate surface area from coords
+        if "toroidal" in surface:
+            return np.linalg.norm(np.cross( # first triangle in quad
+                out_coords[...,1,:] - out_coords[...,0,:],
+                out_coords[...,2,:] - out_coords[...,0,:],
+            ), axis=-1)/2 + np.linalg.norm(np.cross( # 2nd in quad
+                out_coords[...,2,:] - out_coords[...,0,:],
+                out_coords[...,3,:] - out_coords[...,0,:],
+            ), axis=-1)/2
+        else:
+            return np.linalg.norm(np.cross(
+                out_coords[...,1,:] - out_coords[...,0,:],
+                out_coords[...,2,:] - out_coords[...,0,:],
+            ), axis=-1)/2
+
+    else: # ergo need to calculate for "all"
+        assert coords == False  # coords=True, surface="all" not allowed by me
+
+        surface_sum = np.zeros(wv.shape[:3])
+        for surface_name in surfaces_indices.keys():
+            surface_sum += triangulated_surface(g, triangle_cut=triangle_cut,
+                                     surface=surface_name, coords=False,
+                                     wv=wv, xyz_g=xyz_g)
+        return surface_sum
+
+
+# calculate the cell volume of each cell.
+# the surfaces with normals in r or tht directions are in general curved
+# so we approximate the curved quad by two planar triangles.
+# the line inside of the cell surface
+# between the triangles either goes   (0,1) @-------------@ (1,1)
+# from vertex[0,0] to vertex[1,1] or       / \.       ./'/
+# from vertex[1,0] to vertex[0,1]. This   /    \. ./'   /    \. = 01->10
+# coice is specified in triangle_cut     /    ./'\.    /    ./' = 00->11
+# (dict) with either "00->11" or        / ./'      \. /
+# "01->10" as the value correspond.    @-------------@ (r=1, tht=0)
+# to the "r" & "tht" key.           (0,0)
+def triangulated_volume(g, triangle_cut={"r":"00->11", "tht":"00->11"}):
+    # we can fill the whole cell volume via 6 tetrahedrons
+    xyz_g = np.empty_like(g)
+    xyz_g[...,0] = g[...,0] * np.cos(g[...,2])  # x = r cos phi
+    xyz_g[...,1] = g[...,0] * np.sin(g[...,2])  # y = r sin phi
+    xyz_g[...,2] = g[...,1]                     # z = z
+
+    wv = yield_grid_cells(xyz_g, 1, 1, 1)
+    rs, thts, phis = wv.shape[:3]
+    vols = np.empty((rs, thts, phis))
+    subvols = np.empty(6)
+
+    if triangle_cut["r"]=="00->11" and triangle_cut["tht"]=="00->11":
+        prism_tetra = _tri_antiprism_tetrahedrons([
+            (0,0,0), (0,1,1), (1,1,0), (0,0,1), (1,1,1), (1,0,0)])
+        tetrahedrons = [[(0,0,0), (0,1,1), (1,1,0), (0,1,0)],
+                [(0,0,1), (1,1,1), (1,0,0), (1,0,1)], *prism_tetra]
+
+    elif triangle_cut["r"]=="01->10" and triangle_cut["tht"]=="00->11":
+        prism_tetra = _tri_antiprism_tetrahedrons([
+            (1,0,0), (0,0,1), (0,1,0), (1,0,1), (0,1,1), (1,1,0)])
+        tetrahedrons = [[(1,0,0), (0,0,1), (0,1,0), (0,0,0)],
+                [(1,0,1), (0,1,1), (1,1,0), (1,1,1)], *prism_tetra]
+
+    elif triangle_cut["r"]=="00->11" and triangle_cut["tht"]=="01->10":
+        prism_tetra = _tri_antiprism_tetrahedrons([
+            (0,0,0), (1,0,1), (0,1,1), (1,0,0), (1,1,1), (0,1,0)])
+        tetrahedrons = [[(0,0,0), (1,0,1), (0,1,1), (0,0,1)],
+                [(1,0,0), (1,1,1), (0,1,0), (1,1,0)], *prism_tetra]
+    else:  # triangle_cut["r"]=="01->10" and triangle_cut["tht"]=="01->10"
+        prism_tetra = _tri_antiprism_tetrahedrons([
+            (0,1,0), (1,1,1), (0,0,1), (1,1,0), (1,0,1), (0,0,0)])
+        tetrahedrons = [[(0,1,0), (1,1,1), (0,0,1), (0,1,1)],
+                [(1,1,0), (1,0,1), (0,0,0), (1,0,0)], *prism_tetra]
+
+    for r in range(rs):
+        for tht in range(thts):
+            for phi in range(phis):
+                for i in range(6):
+                    subvols[i] = _tetrahedron_volume(wv[r,tht,phi],
+                                                    tetrahedrons[i])
+                    vols[r, tht, phi] = np.abs(subvols).sum()
+
+    return vols
+
+# https://en.wikipedia.org/wiki/Tetrahedron#Irregular_tetrahedra
+def _tetrahedron_volume(cell, indices):
+    det_mat = np.empty((3,3))
+    det_mat[0] = cell[indices[0]] - cell[indices[3]]
+    det_mat[1] = cell[indices[1]] - cell[indices[3]]
+    det_mat[2] = cell[indices[2]] - cell[indices[3]]
+    return np.linalg.det(det_mat)/6
+
+# converts antiprism to 4 tetrahedron indices
+# indices are given as upper1, upper2 upper3, lower1 lower2 lower3 with
+# lower1 being the index connecting to upper1 and upper2
+# and lower2 to upper2 and upper3
+def _tri_antiprism_tetrahedrons(indices):
+    u1, u2, u3, l1, l2, l3 = indices
+    # cut into two parts along u1, u3, l2, l1 with crease edge as l1, u3
+    # and then into two tetrahedrons
+    return [[u1, u3, l1, l3], [l3, l1, u3, l2],
+            [l1, u1, u3, u2], [l1, l2, u3, u2]]
+
+
+
+def approx_volume(g): # APPROXIMATION
+    wv = yield_grid_cells(g, 1, 1, 1)  # window view
+    # calculate coord differences 
+    r = wv[...,0,0,0,0]
+    dr = np.diff(wv[...,:,0,0,:], axis=-2)[...,0,:]
+    dz = np.diff(wv[...,0,:,0,:], axis=-2)[...,0,:]
+    drphi = np.diff(wv[...,0,0,:,:], axis=-2)[...,0,:]
+    # change phi angle difference to space difference by multiplying with r
+    drphi *= r.reshape(r.shape + (1,)) # dphi * r
+    # calculate volume from column vector determinant
+    vectors = np.empty(dr.shape + (3,))
+    vectors[...,0], vectors[...,1], vectors[...,2] = dr, dz, drphi
+    vols = np.linalg.det(vectors)
+    return vols
+
+@r_tht_direction_tuple
+def non_orthogonality(g, direction):
+    assert direction == "r" or direction == "tht"
+    ws = (2,1,0) if direction=="r" else (1,2,0)  # window size
+    wv = yield_grid_cells(g, *ws)  # window view
+    # the two directions of cell edges, along r and along tht
+    rs, thts, phis = wv.shape[:3]
+    # angles on faces
+    af = np.empty((rs, thts, phis))
+    
+    # using numpy vector functions is fast, but takes ages to code. will loop
+    for r in range(rs):
+        for tht in range(thts):
+            for phi in range(phis):
+                x = wv[r,tht,phi]
+                # cell centers
+                mid, mid_af = _cell_centers(g, x, direction)
+                # direction of cell midpoint connections 
+                v_af = (mid_af - mid)/np.linalg.norm(mid_af - mid)
+                # direction of edges 
+                t_af = x[1,1]-x[1,0] if direction == "r" else x[1,1]-x[0,1]
+                tn_af = t_af/np.linalg.norm(t_af)  # don't need normal
+                # angle between edge direction and midpoint direction
+                # angle(v,n) = arcsin abs (v.t)/(||v||*||t||)
+                # with t tangential and n corresponding normal vector
+                af[r,tht,phi] = np.arcsin(np.abs(np.dot(tn_af, v_af)))
+    return af
+
+@r_tht_direction_tuple
+def unevenness(g, direction):
+    assert direction == "r" or direction == "tht"
+    ws = (2,1,0) if direction=="r" else (1,2,0)  # window size
+    wv = yield_grid_cells(g, *ws)  # window view
+    # the two directions of cell edges, along r and along tht
+    rs, thts, phis = wv.shape[:3]
+    # unevenness on faces, also named a
+    af = np.empty((rs, thts, phis))
+    
+    # using numpy vector functions is fast, but takes ages to code. will loop
+    for r in range(rs):
+        for tht in range(thts):
+            for phi in range(phis):
+                x = wv[r,tht,phi,...,:2]
+                # cell centers
+                mid, mid_af = _cell_centers(g, x, direction)
+                # direction of cell midpoint connections 
+                v_af = (mid_af - mid)/np.linalg.norm(mid_af - mid)
+                # cell midpoint line midpoint m_f
+                linemid_af = np.mean([mid, mid_af], axis=0)
+                # cell corner connection line (edge) midpoint c_f
+                facemid_af = _facemid(g, x, direction)
+                # line distance to point
+                ld_af = np.linalg.norm(np.cross(v_af, facemid_af-mid_af))
+                # rotated cell mitpoint dir by 90 deg
+                n_af = np.array([-v_af[1], v_af[0]])
+                # check if needed to add or remove
+                f_af = np.sign(np.dot(n_af, facemid_af-mid_af))
+                # move corner connection line (edge) midpoint on line c_f'
+                proj_facemid_af = facemid_af - f_af*ld_af*n_af
+                # distance of c_f' and m_f
+                num_dist_af = np.linalg.norm(proj_facemid_af-linemid_af)
+                # distance between two cell midpoints 
+                den_dist_af= np.linalg.norm(mid_af - mid)
+                # measure of unevenness
+                af[r,tht,phi] = num_dist_af/den_dist_af
+    return af
+
+@r_tht_direction_tuple
+def skewness(g, direction):
+    assert direction == "r" or direction == "tht"
+    ws = (2,1,0) if direction=="r" else (1,2,0)  # window size
+    wv = yield_grid_cells(g, *ws)  # window view
+    # the two directions of cell edges, along r and along tht
+    rs, thts, phis = wv.shape[:3]
+    # unevenness on faces, also named a
+    af = np.empty((rs, thts, phis))
+    
+    # using numpy vector functions is fast, but takes ages to code. will loop
+    for r in range(rs):
+        for tht in range(thts):
+            for phi in range(phis):
+                x = wv[r,tht,phi,...,:2]
+                # cell centers
+                mid, mid_af = _cell_centers(g, x, direction)
+                # direction of cell midpoint connections 
+                v_af = (mid_af - mid)/np.linalg.norm(mid_af - mid)
+                # cell corner connection line (edge) midpoint c_f
+                facemid_af = _facemid(g, x, direction)
+                # line distance to point == ||c_f' - c_f||
+                ld_af = np.abs(np.cross(v_af, facemid_af-mid_af))
+                # distance between two cell midpoints 
+                den_dist_af = np.linalg.norm(mid_af - mid)
+                # measure of skewness
+                af[r,tht,phi] = ld_af/den_dist_af
+    return af
+
+# THIS WORKS NOW, but not before !!!!
+def convex(g):
+    wv = yield_grid_cells(g, 1, 1, 0)  # window view
+    # the two directions of cell edges, along r and along tht
+    rs, thts, phis = wv.shape[:3]
+    # skewness on r and tht faces, also named a
+    is_convex = np.zeros((rs, thts, phis), dtype=int)
+    
+    # using numpy vector functions is fast, but takes ages to code. will loop
+    for r in range(rs):
+        for tht in range(thts):
+            for phi in range(phis):
+                x = wv[r,tht,phi,...,0,:2]
+                # from point, to point, *other point indices to check against
+                # its just looping through the subarray, feel free to improve
+                indices = [[(0,0), (1,0), (1,1), (0,1)],
+                           [(1,0), (1,1), (0,1), (0,0)],
+                           [(1,1), (0,1), (0,0), (1,0)],
+                           [(0,1), (0,0), (1,0), (1,1)]]
+                for (of, to, A, B) in indices:
+                    di = x[to] - x[of] # direction of line
+                    # check if A and B on same side of line. If so, then 
+                    # the crossproduct with the direction has the same sign
+                    vA = np.sign(np.cross(di, x[A]-x[of]))
+                    vB = np.sign(np.cross(di, x[B]-x[of]))
+                    is_convex[r,tht,phi] += vA + vB
+    # there are 8 relations per cell that are tested (and added) if all are
+    # true, the result will be 8. The smaller the number the worse the cell
+    is_convex = ( is_convex == 8 )
+    return is_convex
+
+def fast_convex(g):
+    wv = yield_grid_cells(g, 1, 1, 0)  # window view
+    wv =  wv[...,0,:2] # ignore phi component in cell and coord
+    # the two directions of cell edges, along r and along tht
+    rs, thts, phis = wv.shape[:3]
+    # the check works in the way, that the cell has 4 edges and for
+    # each edge (going counterclockwise), the other cell corners 
+    # need to be on the left side of the edge for the cell to be convex
+    # cell edge directions along parametrisation in r and tht
+    dir_r, dir_tht  = np.diff(wv, axis=3), np.diff(wv, axis=4)
+    # we need to invert half the edge- (2nd in r and 1st in tht) 
+    # direction vectors so they point conterclockwise
+    # views into dir_r, d_tht for easy access
+    dir_r0, dir_r1 = dir_r[...,0,0,:], dir_r[...,0,1,:] 
+    dir_tht0, dir_tht1 = dir_tht[...,0,0,:], dir_tht[...,1,0,:]
+    dir_r1 *= -1; dir_tht0 *= -1
+    # now we need to check if the vectors in order of
+    # r[0], tht[1], (-)r[1], (-)tht[0] are always turning left
+    # from the last vector (between 0° and 180°).
+    # cell convex IFF above true for all four edges
+    cross_values = np.empty((rs, thts, phis, 4))
+    cross_values[...,0] = np.cross(dir_r0, dir_tht1)
+    cross_values[...,1] = np.cross(dir_tht1, dir_r1)
+    cross_values[...,2] = np.cross(dir_r1, dir_tht0)
+    cross_values[...,3] = np.cross(dir_tht0, dir_r0)
+    # we can take the min > 0 to see if all of them are > 0
+    is_convex = cross_values.min(axis=-1) > 0
+    return is_convex
+    
+def _stats(arr, pprint):
+    arr = arr.flatten()
+    print(arr.shape)
+    mean = np.mean(arr)
+    median = np.median(arr)
+    std = np.std(arr)
+    minv = np.min(arr)
+    maxv = np.max(arr)
+    
+    if pprint:
+        print(f"""Stat values for {pprint}: 
+        µ={mean:.3e}, σ={std:.3e},
+        min:{minv:.3e}, max:{maxv:.3e}, med:{median:.3e}""" )
+    return {"mean":mean, "std":std, "min":minv, "max":maxv, "median":median}
+
+# return (mean, std, minv, maxv, median) for every argument
+def stats(*arrs, combine=True, pprint=False):
+    if combine:
+        if len(arrs) == 1:
+            arrs = arrs[0]
+        arrs = np.concatenate(arrs)
+        return _stats(arrs, pprint)
+    else:
+        stat_outputs = []
+        for i, arr in enumerate(arrs):
+            stat_outputs.append(_stats(arr, pprint + f" {i}"))
+        return stat_outputs
diff --git a/Matlab Noise Analysis/SparseCorr2D.m b/Matlab Noise Analysis/SparseCorr2D.m
index e6a98255517cd3a444a2553d994bda867cd0e169..52751899cf077819df932dc18275cb2775c3c22b 100644
--- a/Matlab Noise Analysis/SparseCorr2D.m	
+++ b/Matlab Noise Analysis/SparseCorr2D.m	
@@ -19,8 +19,8 @@ Lt    = 2*pi*Rmax; % Correlation "length" in toroidal direction
 %Lp = Lr;
 Lcorr = Lr/2;
 
-nr = 100; % number of grid points in x-direction
-np = 100; % number of grid points in y-direction
+nr = 20; % number of grid points in x-direction
+np = 50; % number of grid points in y-direction
 nt = 10;
 
 r = (rstart/rmin:0.9/(nr-1):1)*rmin;
@@ -80,7 +80,7 @@ end
 b = issymmetric(Sigma_corr)
 %d = eig(Sigma_corr)
 
-[U,S,V] = svd(Sigma_corr);
+[U,S,V] = svds(Sigma_corr,nr*np);
 z = 0+randn(nr*np,1)*sigma;
 noise_corr = U *sqrt(S)*z;
 % Generate the noise from multivariate normal distributions
diff --git a/Matlab Noise Analysis/SparseCorr3D.m b/Matlab Noise Analysis/SparseCorr3D.m
index 238a1e732288cb7d5243594b4898e132d894fa9f..2b76738e5cc6cd59f67274d16bd6359550441e61 100644
--- a/Matlab Noise Analysis/SparseCorr3D.m	
+++ b/Matlab Noise Analysis/SparseCorr3D.m	
@@ -19,8 +19,8 @@ Lt    = 2*pi*Rmax; % Correlation "length" in toroidal direction
 %Lp = Lr;
 Lcorr = Lr/2;
 
-nr = 10; % number of grid points in x-direction
-np = 10; % number of grid points in y-direction
+nr = 20; % number of grid points in x-direction
+np = 50; % number of grid points in y-direction
 nt = 10;
 tol = 10^-15;
 r = (rstart/rmin:0.9/(nr-1):1)*rmin;
@@ -94,10 +94,16 @@ end
     noise_no_corr = mvnrnd(Mu,Sigma_no_corr);
     %noise_corr    = mvnrnd(Mu,Sigma_corr);
 
+   fileID = fopen('CorrelatedNoise','w');
+   fprintf(fileID,'%6s \n',noise_corr);
+   fclose(fileID);
+
     % Reshape onto mesh
    noise_no_corr = reshape(noise_no_corr,nr,np,nt);
    noise_corr = reshape(noise_corr,nr,np,nt);
 
+   
+
     % Plot
     %figure;surf(R,P,noise_no_corr);title('Independent');
     %figure;surf(R,P,noise_corr); title('Spatial correlation');
diff --git a/Scripts_Matthieu/Analysis2D.py b/Scripts_Matthieu/Analysis2D.py
index 96df77b77eb76b6006cec14eba018f0a61da4f74..cf58cc057457a576ca322bce34f7fc248efdf4fa 100644
--- a/Scripts_Matthieu/Analysis2D.py
+++ b/Scripts_Matthieu/Analysis2D.py
@@ -12,10 +12,13 @@ import sys
 import os
 import seaborn as sns
 import numpy as np
+from collections import Counter
 
+plt.rcParams.update({'font.size': 12})
 method = sys.argv[1]
 anCase = sys.argv[2]
 noise = sys.argv[3]
+#weight = sys.argv[4]
 path = '' # Case+'/'
 allInfo = []
 allInfoGB = []
@@ -32,12 +35,14 @@ pos = info.get('pos')
 gbpos = infoGB.get('gb_pos')
 
 angles = [r[1] for r in pos]
-uniqueangles = list(set(angles))    
-print(uniqueangles)
+uniqueangles = list(set(angles)) 
+a = sorted(uniqueangles) 
+b = Counter(a)
+print('angles',a)
 # Get a slice for 2D visualization
-
-torsurf = uniqueangles[1]
-print(torsurf)
+print('nb angles',len(uniqueangles))
+torsurf = [a[3]]
+print('chosen surface',torsurf)
 torSliceDict = {}
 torslices = AnalysisFunctions.getAllSlices(pos,allInfo,1,torsurf)
 for i in range(len(torslices)):
@@ -45,8 +50,9 @@ for i in range(len(torslices)):
     
 
 angles = [r[1] for r in gbpos]
-uniqueanglesGB = list(set(angles))     
-torsurfGB = uniqueanglesGB[2]
+uniqueanglesGB = list(set(angles)) 
+a = sorted(uniqueanglesGB)     
+torsurfGB = [a[2]]
 torSliceDictGB = {}
 torslicesGB = AnalysisFunctions.getAllSlices(gbpos,allInfoGB,1,torsurfGB)
 for i in range(len(torslicesGB)):
@@ -62,11 +68,15 @@ gLengths = AnalysisFunctions.gradientLength(torSliceDict.get('ef_an_mag'),potent
 plot1 = plt.figure(1)
 plt.xlabel('Radial Position [cm]')
 plt.ylabel('Vertical Position [cm]')
-title = 'Magnitude of Electic Field (Numerical)'
+title = 'Magnitude of Electric Field [V/m] (Numerical)'
+cm=torSliceDict.get('ef_mag')
+for i in range(len(cm)):
+    if cm[i]>12000:
+        cm[i]=math.nan
 plt.title(title)
-sc1 = plt.scatter(x, y, s=20, c=torSliceDict.get('ef_mag'), cmap='plasma')
+sc1 = plt.scatter(x, y, s=20, c=cm, cmap='plasma')
 plt.colorbar(sc1)
-plt.savefig(path+method+anCase+noise+title+'.png')
+plt.savefig(path+method+anCase+noise+'Magnitude of Electric Field (Numerical) .pdf')
 
 plot2 = plt.figure(2)
 plt.xlabel('Radial Position [cm]')
@@ -75,22 +85,31 @@ title = 'Horizontal component of Electric Field'
 plt.title(title)
 sc2 = plt.scatter(x, y, s=20, c = [x[0] for x in torSliceDict.get('ef_an')], cmap='plasma')
 plt.colorbar(sc2)
-plt.savefig(path+method+anCase+noise+title+'.png')
+plt.savefig(path+method+anCase+noise+title+'.pdf')
 
 plot3 = plt.figure(3)
 plt.xlabel('Radial Position [cm]')
 plt.ylabel('Vertical Position [cm]')
 title = 'Relative magnitude error of Electric Field'
+cm=torSliceDict.get('efield_er_rel_mag')
+for i in range(len(cm)):
+    if cm[i]>1:
+        cm[i]=math.nan
+	
 plt.title(title)
-sc3 = plt.scatter(x, y, s=20, c=torSliceDict.get('efield_er_rel_mag'), cmap='plasma', norm=matplotlib.colors.LogNorm())
+sc3 = plt.scatter(x, y, s=20, c=cm, cmap='plasma', norm=matplotlib.colors.LogNorm())
 plt.colorbar(sc3)
-plt.savefig(path+method+anCase+noise+title+'.png')
+plt.savefig(path+method+anCase+noise+title+'.pdf')
 
 plot4 = plt.figure(4)
-title = 'Potential'
+title = 'Potential [V]'
+plt.xlabel('Radial Position [cm]')
+plt.ylabel('Vertical Position [cm]')
 plt.title(title)
-PlottingFunctions.plot2DContour(Constants.ns,Constants.ns,Constants.Rmajor-Constants.Rminor,Constants.Rmajor+Constants.Rminor,-Constants.Rminor,Constants.Rminor,Constants.Rmajor,Constants.Rminor,AnalyticalPrescriptions.potential_description1,'X [cm]', 'Y [cm]','Potential [V]',False)
-plt.savefig(path+method+anCase+noise+title+'.png')
+sc5 = plt.scatter(x, y, s=20, c = torSliceDict.get('potential'), cmap='plasma')
+plt.colorbar(sc5)
+plt.savefig(path+method+anCase+noise+title+'.pdf')
+
 
 plot5 = plt.figure(5)
 plt.xlabel('Radial Position [cm]')
@@ -99,7 +118,7 @@ title = 'Magnitude of Electric Field (Analytical)'
 plt.title(title)
 sc5 = plt.scatter(x, y, s=20, c = torSliceDict.get('ef_an_mag'), cmap='plasma')
 plt.colorbar(sc5)
-plt.savefig(path+method+anCase+noise+title+'.png')
+plt.savefig(path+method+anCase+noise+title+'.pdf')
 
 
 plot6 = plt.figure(6)
@@ -109,7 +128,7 @@ title = 'Magnitude of ExB Drift (Numerical)'
 plt.title(title)
 sc6 = plt.scatter(x, y, s=20, c=torSliceDict.get('edrift_mag'), cmap='plasma')
 plt.colorbar(sc6)
-plt.savefig(path+method+anCase+noise+title+'.png')
+plt.savefig(path+method+anCase+noise+title+'.pdf')
 
 
 plot7 = plt.figure(7)
@@ -119,32 +138,32 @@ title = 'Relative magnitude error of ExB Drift'
 plt.title(title)
 sc7 = plt.scatter(x, y, s=20, c=torSliceDict.get('edrift_er_rel_mag'), cmap='plasma', norm=matplotlib.colors.LogNorm())
 plt.colorbar(sc7)
-plt.savefig(path+method+anCase+noise+title+'.png')
+plt.savefig(path+method+anCase+noise+title+'.pdf')
 
 
 plot8 = plt.figure(8)
 plt.xlabel('Radial Position [cm]')
 plt.ylabel('Vertical Position [cm]')
-title = 'Magnitude of Efield (radial)'
+title = 'Magnitude of Efield [V/m] (Radial)'
 plt.title(title)
 sc8 = plt.scatter(x, y, s=20, c= [x[0] for x in torSliceDict.get('efield')], cmap='plasma')
 plt.colorbar(sc8)
-plt.savefig(path+method+anCase+noise+title+'.png')
+plt.savefig(path+method+anCase+noise+'Magnitude of Efield (Radial).pdf')
 
 
 plot9 = plt.figure(9)
 plt.xlabel('Radial Position [cm]')
 plt.ylabel('Vertical Position [cm]')
-title = 'Magnitude of Efield (poloid)'
+title = 'Magnitude of Efield [V/m] (Poloidal)'
 plt.title(title)
 sc9 = plt.scatter(x, y, s=20, c= [x[2] for x in torSliceDict.get('efield')], cmap='plasma')
 plt.colorbar(sc9)
-plt.savefig(path+method+anCase+noise+title+'.png')
+plt.savefig(path+method+anCase+noise+'Magnitude of Efield  (Poloidal).pdf')
 
 plot10 = plt.figure(10)
 sns.violinplot(torSliceDict.get('efield_er_rel_mag'))
 title = 'ErrorDistribution'+method+anCase
-plt.savefig(path+method+anCase+noise+title+'.png')
+plt.savefig(path+method+anCase+noise+title+'.pdf')
 
 plot11 = plt.figure(11)
 plt.xlabel('Radial Position [cm]')
@@ -153,7 +172,7 @@ title = 'GradientLengths'
 plt.title(title)
 sc11 = plt.scatter(x, y, s=20, c=gLengths, cmap='plasma')
 plt.colorbar(sc11)
-plt.savefig(path+method+anCase+noise+title+'.png')
+plt.savefig(path+method+anCase+noise+title+'.pdf')
 
 normErrors = np.multiply(np.array(torSliceDict.get('edrift_er_rel_mag')),np.array(gLengths))
 plot12 = plt.figure(12)
@@ -161,9 +180,14 @@ plt.xlabel('Radial Position [cm]')
 plt.ylabel('Vertical Position [cm]')
 title = 'NormalizedRelErrors ExB'
 plt.title(title)
-sc12 = plt.scatter(x, y, s=20, c=normErrors, cmap='plasma', norm=matplotlib.colors.LogNorm())
+cm=normErrors
+for i in range(len(cm)):
+    if cm[i]>1:
+        cm[i]=math.nan
+	
+sc12 = plt.scatter(x, y, s=20,c=cm , cmap='plasma', norm=matplotlib.colors.LogNorm())
 plt.colorbar(sc12)
-plt.savefig(path+method+anCase+noise+title+'.png')
+plt.savefig(path+method+anCase+noise+title+'.pdf')
 
 plot13 = plt.figure(13)
 plt.xlabel('Radial Position [cm]')
@@ -172,7 +196,7 @@ title = 'Relative magnitude error of GradB Drift'
 plt.title(title)
 sc13 = plt.scatter(x, y, s=20, c=torSliceDict.get('gdrift_er_rel_mag'), cmap='plasma')
 plt.colorbar(sc13)
-plt.savefig(path+method+anCase+noise+title+'.png')
+plt.savefig(path+method+anCase+noise+title+'.pdf')
 
 plot14 = plt.figure(14)
 plt.xlabel('Radial Position [cm]')
@@ -181,7 +205,7 @@ title = 'Magnitude of GradB Drift (Analytical)'
 plt.title(title)
 sc14 = plt.scatter(x, y, s=20, c=torSliceDict.get('gdrift_an_mag'), cmap='plasma')
 plt.colorbar(sc14)
-plt.savefig(path+method+anCase+noise+title+'.png')
+plt.savefig(path+method+anCase+noise+title+'.pdf')
 
 plot15 = plt.figure(15)
 plt.xlabel('Radial Position [cm]')
@@ -190,13 +214,22 @@ title = 'Magnitude of GradB Drift (Numerical)'
 plt.title(title)
 sc15 = plt.scatter(x, y, s=20, c=torSliceDict.get('gdrift_mag'), cmap='plasma')
 plt.colorbar(sc15)
-plt.savefig(path+method+anCase+noise+title+'.png')
+plt.savefig(path+method+anCase+noise+title+'.pdf')
 
 plot16 = plt.figure(16)
 sns.violinplot(torSliceDict.get('edrift_er_rel_mag'))
 title = 'ErrorDistributionEDRIFT'+method+anCase
-plt.savefig(path+method+anCase+noise+title+'.png')
+plt.savefig(path+method+anCase+noise+title+'.pdf')
+
 
+plot20 = plt.figure(20)
+title = 'Potential Noise'
+plt.xlabel('Radial Position [cm]')
+plt.ylabel('Vertical Position [cm]')
+plt.title(title)
+sc20 = plt.scatter(x, y, s=20, c = torSliceDict.get('potdif'), cmap='plasma')
+plt.colorbar(sc20)
+plt.savefig(path+method+anCase+noise+title+'.pdf')
 
 # ! Different position for gradB
 x = [r[0] for r in torSliceDictGB.get('gb_pos')]
@@ -210,7 +243,7 @@ title = 'Magnitude of GradB (Analytical)'
 plt.title(title)
 sc17 = plt.scatter(x, y, s=20, c=torSliceDictGB.get('gb_an_mag'), cmap='plasma')
 plt.colorbar(sc17)
-plt.savefig(path+method+anCase+noise+title+'.png')
+plt.savefig(path+method+anCase+noise+title+'.pdf')
 
 plot18 = plt.figure(18)
 plt.xlabel('Radial Position [cm]')
@@ -219,7 +252,7 @@ title = 'Magnitude of GradB (Numerical)'
 plt.title(title)
 sc18 = plt.scatter(x, y, s=20, c=torSliceDictGB.get('gb_mag'), cmap='plasma')
 plt.colorbar(sc18)
-plt.savefig(path+method+anCase+noise+title+'.png')
+plt.savefig(path+method+anCase+noise+title+'.pdf')
 
 plot19 = plt.figure(19)
 plt.xlabel('Radial Position [cm]')
@@ -228,7 +261,10 @@ title = 'Relative magnitude error of GradB '
 plt.title(title)
 sc19 = plt.scatter(x, y, s=20, c=torSliceDictGB.get('gb_er_rel_mag'), cmap='plasma', norm=matplotlib.colors.LogNorm())
 plt.colorbar(sc19)
-plt.savefig(path+method+anCase+noise+title+'.png')
+plt.savefig(path+method+anCase+noise+title+'.pdf')
+
+
+
 
 plt.show()
 print('Done Analysis2D')
diff --git a/Scripts_Matthieu/AnalysisFunctions.py b/Scripts_Matthieu/AnalysisFunctions.py
index 8c8e67f058be66f09b82a3a4f1fb88ee23a60f99..fc95820bf1077f92c116d105743a8a3f856439f3 100644
--- a/Scripts_Matthieu/AnalysisFunctions.py
+++ b/Scripts_Matthieu/AnalysisFunctions.py
@@ -7,7 +7,7 @@ import pandas as pd
 import numpy as np
 import ast
 
-def extractValues(positions,gbpositions, drifts, efield, gradb, drgradb, bcells,bnodes, edescr, bdescr, gradbdescr, ExB, gradBdrift, ggcorr, Rmajor):
+def extractValues(positions,gbpositions, drifts, efield, gradb, drgradb, bcells,bnodes, edescr, bdescr, gradbdescr, ExB, gradBdrift, ggcorr,potential, pdescr, Rmajor):
     pos = []
     gbpos = []
     enum = []
@@ -23,9 +23,11 @@ def extractValues(positions,gbpositions, drifts, efield, gradb, drgradb, bcells,
     dea = []
     dga = []
     ga = []
+    pa = []
     bound = []
     boundnodes = []
     ggcorrnum = []
+    pot = []
     print(len(drifts))
     print(len(efield))
     print(len(ggcorr))
@@ -37,10 +39,11 @@ def extractValues(positions,gbpositions, drifts, efield, gradb, drgradb, bcells,
         dbn = [float(x) for x in drgradb[i].split()]
         bnd = float(bcells[i])
         gg = float(ggcorr[i])
+        p = float(potential[i])
 
         # filter our boundary cells => These have zero field and drift and are not useful for interpretation!
 	# quick fix with nan
-        if  bnd != 1 and not math.isnan(en[0]) and not math.isnan(den[0]) and (abs(max(en))) <300 and abs(min(en))<300: 
+        if  bnd != 1 and not math.isnan(en[0]) and not math.isnan(den[0]):# and (abs(max(en))) <300 and abs(min(en))<300: 
             denum.append(den)
             pos.append(dpn)
             enum.append(en)
@@ -50,10 +53,13 @@ def extractValues(positions,gbpositions, drifts, efield, gradb, drgradb, bcells,
             bfa = bdescr(dpn[0], dpn[2], dpn[1],Rmajor, Constants.Rminor)
             gfa = gradbdescr(dpn[0], dpn[2], dpn[1], Rmajor, Constants.Rminor)
             dgfa = gradBdrift(bfa,gfa)
+            pan = pdescr(dpn[0],dpn[2],dpn[1],Rmajor, Constants.Rminor)
+            pa.append(pan)
             dga.append(dgfa.tolist())
             ea.append(efa)
             ba.append(bfa)
             ggcorrnum.append(gg)
+            pot.append(p)
             dea.append(ExB(efa, bfa).tolist())
             denumperp.append(transformRadPerptoCyl(denum[-1],pos[-1], Rmajor))
             enumperp.append(transformRadPerptoCyl(enum[-1],pos[-1], Rmajor))
@@ -69,7 +75,7 @@ def extractValues(positions,gbpositions, drifts, efield, gradb, drgradb, bcells,
             ga.append(gfa)
             gbpos.append(dpn)
             gnumperp.append(transformRadPerptoCyl(gnum[-1],gbpos[-1],Rmajor))
-    return [pos,gbpos, enum, denum,gnum,dgnum, enumperp,denumperp, gnumperp ,dgnumperp,ea , ba, dea, ga,dga, ggcorrnum]
+    return [pos,gbpos, enum, denum,gnum,dgnum, enumperp,denumperp, gnumperp ,dgnumperp,ea , ba, dea, ga,dga, ggcorrnum, pot, pa]
 
 def extractVectorInfo(numeric,analytical):
     res_mag = []
@@ -83,7 +89,7 @@ def extractVectorInfo(numeric,analytical):
         an_mag.append(math.sqrt(sum([x**2 for x in analytical[i]])))
         er_vec.append(Utilities.listdif(numeric[i],analytical[i]))
         er_mag.append(math.sqrt(sum([x**2 for x in er_vec[i]])))
-        er_rel_vec.append([math.sqrt(x**2)/an_mag[i] for x in er_vec[i]])
+        er_rel_vec.append([math.sqrt(x**2)/max(an_mag[i],0.000000001) for x in er_vec[i]])
         if an_mag[i] == 0:
             print('ZERO!')
         er_rel_mag.append(sum(er_rel_vec[i]))
@@ -100,7 +106,7 @@ def slice(positions,info,direction, coordinate):
     index1 = direction-1 %3
     index2 = direction+1 %3
     for i in range(len(positions)):
-        if positions[i][direction] == coordinate:
+        if positions[i][direction] in coordinate:
             pos_slice.append([positions[i][index1],positions[i][index2]])
             info_slice.append(info[i])
 
@@ -115,9 +121,9 @@ def getAllSlices(positions,allInfo,direction,coordinate):
     result.append(pos_slice)
     return result
 
-def getAllInfo(positions,gbpositions,drifts,efield, gradb, drgradb,bcells,bnodes,edescr,bdescr, gradbdescr, ExB, gradBdrift,GGcorr,Rmajor, name):
-    [pos, gbpos, enum, denum,gnum,dgnum, enumperp,denumperp, gnumperp ,dgnumperp,ea , ba, dea, ga,dga, GGcorr] = extractValues(positions, gbpositions,
-                        drifts, efield, gradb, drgradb, bcells,bnodes, edescr, bdescr,gradbdescr, ExB, gradBdrift, GGcorr, Rmajor)
+def getAllInfo(positions,gbpositions,drifts,efield, gradb, drgradb,bcells,bnodes,edescr,bdescr, gradbdescr, ExB, gradBdrift,GGcorr,pot,pdescr,Rmajor, name):
+    [pos, gbpos, enum, denum,gnum,dgnum, enumperp,denumperp, gnumperp ,dgnumperp,ea , ba, dea, ga,dga, GGcorr, potential, potan] = extractValues(positions, gbpositions,
+                        drifts, efield, gradb, drgradb, bcells,bnodes, edescr, bdescr,gradbdescr, ExB, gradBdrift, GGcorr, pot,pdescr,Rmajor)
 
     [deres_mag, dean_mag, deer_mag, deer_vec, deer_rel_mag, deer_rel_vec] =  extractVectorInfo(denumperp,dea)
     [eres_mag, ean_mag, eer_mag, eer_vec, eer_rel_mag, eer_rel_vec] = extractVectorInfo(enumperp, ea)
@@ -125,16 +131,17 @@ def getAllInfo(positions,gbpositions,drifts,efield, gradb, drgradb,bcells,bnodes
     [dgres_mag, dgan_mag, dger_mag, dger_vec, dger_rel_mag, dger_rel_vec] = extractVectorInfo(dgnumperp, dga)
 #    print('an',dgan_mag)
  #   print('res', dgres_mag)
-
+    
+    potdif = Utilities.listdif(potential,potan)
     InfoDict = {}
     InfoDictGB = {}
     keys = ['edrifts', 'efield','edrifts_perp','efield_perp' ,'ef_mag', 'ef_an_mag','ef_an', 'edrift_mag', 'edrift_an_mag', 'ef_vec_er', 'ef_mag_er',
             'edrift_vec_er', 'edrift_er_mag','efield_er_rel_mag', 'efield_er_rel_vec', 'edrift_er_rel_mag', 'edrift_er_rel_vec',
             'gdrifts', 'gdrifts_perp', 'gdrift_mag','gdrift_an_mag',
-            'gdrift_vec_er','gdrift_er_mag','gdrift_er_rel_mag', 'gb_er_rel_vec', 'GGcorr','pos']
+            'gdrift_vec_er','gdrift_er_mag','gdrift_er_rel_mag', 'gb_er_rel_vec', 'GGcorr','potential','potan','potdif','pos']
     keysgb = ['gradb','gradb_perp', 'gb_mag','gb_an_mag','gb_vec_er', 'gb_mag_er','gb_er_rel_mag','gb_er_rel_vec','gb_pos']
     values = [denum,enum,denumperp,enumperp,eres_mag,ean_mag,ea,deres_mag,dean_mag,eer_vec,eer_mag,deer_vec,deer_mag,eer_rel_mag,eer_rel_vec,deer_rel_mag,deer_rel_vec,
-              dgnum,dgnumperp,dgres_mag,dgan_mag,dger_vec,dger_mag,dger_rel_mag,dger_rel_vec,GGcorr,pos]
+              dgnum,dgnumperp,dgres_mag,dgan_mag,dger_vec,dger_mag,dger_rel_mag,dger_rel_vec,GGcorr,potential,potan,potdif,pos]
     valuesgb = [gnum,gnumperp,gres_mag,gan_mag,ger_vec,ger_mag,ger_rel_mag,ger_rel_vec,gbpos]
     for i in range(len(values)):
         InfoDict[keys[i]] = values[i]
@@ -165,7 +172,7 @@ def transformCyltoRadPerp(v,pos, Rmajor):
     result.append(v[2]*c-v[0]*s)
     return result
     
-def readAllInfo(method,descr,noise, folder):
+def readAllInfo(method,descr,noise,folder):
     result = {}
     print(folder+'AllInfo'+method+'Descr'+descr+'Noise'+noise+'.csv')
     df = pd.read_csv(folder+'AllInfo'+method+'Descr'+descr+'Noise'+noise+'.csv')
diff --git a/Scripts_Matthieu/AnalyticalPrescriptions.py b/Scripts_Matthieu/AnalyticalPrescriptions.py
index da6f92c3d398e5b46ee43caf7831e9f83c2d8355..eee3c13c42410e1747ac0d0f4960e95038f1410f 100644
--- a/Scripts_Matthieu/AnalyticalPrescriptions.py
+++ b/Scripts_Matthieu/AnalyticalPrescriptions.py
@@ -58,6 +58,13 @@ def potential_description2(r,z,theta,Rmajor,Rminor):
     z = z/100
     potval = 100*(math.cos(2*math.atan2(z,r)) + math.sqrt(r**2+z**2))
     return potval
+def potential_description7(r,z,theta,Rmajor,Rminor):
+    r = (r-Rmajor)/100
+    z = z/100
+    potval = 100*(math.cos(2*math.atan2(z,r)))
+    return potval
+    
+
 # Electric Fields
 def e_descr1(r,z,theta, Rmajor):
     r = r/100
@@ -101,4 +108,12 @@ def e_descr6(r,z,theta, Rmajor):
     Ephi = 0
     Ez = -500*math.exp(-D)*1/D*z
     return [Er,Ephi, Ez]
+    
+def e_descr7(r,z,theta, Rmajor):
+    r = (r-Rmajor)/100
+    z = z/100
+    Er = 200*math.sin(2*math.atan2(z,r))*z/(z**2+r**2) 
+    Ephi = 0
+    Ez = 200*math.sin(2*math.atan2(z,r))*-r/(z**2+r**2) 
+    return [Er,Ephi,Ez]
 
diff --git a/Scripts_Matthieu/CorPlots.py b/Scripts_Matthieu/CorPlots.py
new file mode 100644
index 0000000000000000000000000000000000000000..1f3ccb10dd4942cf34bdca7bd7119da3946670b9
--- /dev/null
+++ b/Scripts_Matthieu/CorPlots.py
@@ -0,0 +1,165 @@
+import numpy as np
+import os
+import matplotlib.pyplot as plt
+import driftFunctions
+import AnalyticalPrescriptions
+import Constants
+import Utilities
+import pandas as pd
+import PlottingFunctions
+import AnalysisFunctions
+import sys
+from scipy import sparse
+import scipy.optimize as optimize
+import math
+import matplotlib
+import statistics
+from scipy import stats
+path = os.getcwd()
+sys.path.append('../../emc3-grid-generator')
+from gg import GG
+
+xnames = ['_1','_2','_3','_4','_5','_6','_7','_8','_9']#,'_10']
+Tdata = []
+
+def monexp(x,L):
+    return np.exp(-x/L)
+
+
+for i in range(len(xnames)):
+    file = open('/u/mbj/Drifts/data-for-drift-computations/Reference-FullRes2/X-TEST-HR/x-out'+xnames[i]+'/x-out/TE_TI','r')
+    Temptemp = [r.split() for r in file.readlines()]
+    Temp = []
+    for i in Temptemp:
+        Temp.extend(i)
+    Tempel = Temp[0:int(len(Temp)/2)]
+    Tempar = np.array(Tempel, dtype=float)
+    Tdata.append(Tempar)
+
+
+avTemp = Tdata[0]    
+for i in range(1,len(xnames)):
+    print(Tdata[i][50000:50010])
+    avTemp = np.add(avTemp,Tdata[i])
+avTemp = avTemp*1/len(xnames)
+
+difData = []  
+normdifData = []  
+variance = []
+for i in range(len(xnames)):
+    difTemp = np.subtract(avTemp,Tdata[i])
+#    normdifTemp = np.divide(difTemp,avTemp)
+    var = np.square(difTemp)
+    variance.append(var)
+    difData.append(difTemp)
+#    normdifData.append(normdifTemp) 
+    
+avDif = difData[0]    
+avVar = variance[0]
+for i in range(1,len(xnames)):
+    avDif = np.add(avDif,difData[i])
+    avVar = np.add(avVar,variance[i])
+avDif = avDif*1/len(xnames)
+avVar = avVar*1/len(xnames)
+posdata = AnalysisFunctions.readText(path+'/Output1Descr6Noise0/DRIFT_POS')
+# Load position data
+geoData = AnalysisFunctions.readText('/u/mbj/Drifts/data-for-drift-computations/Reference-FullRes2/Output5Descr6Noise0/NBS')
+volumes =np.array(AnalysisFunctions.readText('/u/mbj/Drifts/data-for-drift-computations/Reference-FullRes2/Output3Descr3Noise0/VOLUMES'),dtype=float)
+plot=plt.figure(1)
+########### Get covariance data => Try to fit correlation length model, looking at individual surfaces ##############################
+# Get single error
+limits = AnalysisFunctions.readText(path+'/Output1Descr6Noise0/LIMITS')
+dim = math.floor(len(geoData)/5)
+#TSS = [[15],[4],[15],[4],[15],[4],[15],[4],[15],[4],[15],[1]]
+RSS=[[54],[66],[78],[95],[85],[92],[100],[113],[88],[72]]
+PSS = [[100],[50],[150],[200],[360],[180],[210],[260],[315],[450]]
+RSS=[[72]]
+PSS=[[430]]
+colors=["b","g","r","c","m","y","k","purple","grey","orange"]
+choice=2
+rs=[]
+zs=[]
+for k in range(len(RSS)):
+    TS=[12]
+    PS=PSS[k]
+    RS=RSS[k]
+    positions = []
+    difvalues=[]
+    variances =[]
+    indices=[]
+    vols=[]
+    for i in range(dim):
+        coords =[float(x) for x in geoData[5*i+2].split()]
+        limT = [float(x) for x in limits[4*i+1].split()]
+        limP = [float(x) for x in limits[4*i+2].split()]
+        limR = [float(x) for x in limits[4*i+3].split()]
+        if choice==1:
+            if limT[0] in TS and limR[0] in RS:
+               positions.append(coords)
+               difvalues.append([r[i] for r in difData])
+               indices.append(limP[0])
+        elif choice == 2:
+           if limT[0] in TS and limP[0] in PS:
+               positions.append(coords)
+               difvalues.append([r[i] for r in difData])
+               indices.append(limR[0])
+        else:
+           if limR[0] in RS and limP[0] in PS:
+               positions.append(coords)
+               difvalues.append([r[i] for r in difData])    
+               indices.append(limT[0])
+    
+    posArray = np.array(positions)
+    difArray = np.array(difvalues)
+    print(posArray[0])
+    
+    covmat = np.cov(difArray)
+    cormat = np.corrcoef(difArray)
+    cor1D = cormat[0]
+    cov1D = covmat[0]
+
+    distances = []
+    poldistances = []
+    polangles = []
+    #indices=np.linspace(0,len(cor1D),len(cor1D))
+#    for i in range(len(cor1D)):
+#        dis = min(math.sqrt((positions[i][0]-positions[0][0])**2+(positions[i][2]-positions[0][2])**2+((positions[i][0]+positions[0][0])/2*abs(positions[i][1]-positions[0][1]))**2), \
+#        math.sqrt((positions[i][0]-positions[35][0])**2+(positions[i][2]-positions[35][2])**2+((positions[i][0]+positions[35][0])/2*abs(positions[i][1]-positions[35][1]))**2))
+#    poldis = abs(math.atan2(positions[i][2],positions[i][0]-580)-math.atan2(positions[0][2],positions[0][0]-580))*(math.sqrt((positions[i][0]-580)**2+(positions[i][2])**2)+math.sqrt((positions[0][0]-580)**2+(positions[0][2])**2))/2
+#    polangle = math.atan2(positions[i][2],positions[i][0]-580)
+#        distances.append(dis)
+
+
+
+
+    xl = indices
+    xlab='Toroidal Index '
+    plot = plt.figure(1)
+    sc1 = plt.scatter(xl,cor1D,color=colors[k])
+    plt.xlabel(xlab)
+    plt.ylabel('Correlation Ceofficient')
+    plt.title('Correlation with Starting Cell')
+    r = positions[0][0]
+    z = positions[0][2]
+    rs.append(r)
+    zs.append(z)
+    
+
+
+
+print(rs,zs,'rz')
+plot=plt.figure(2)
+plt.title('Spatial Distribution of Considered Starting Points')
+
+grid = GG()
+dat = grid.read_mesh3d(fn='Inputs/GEOMETRY_3D_DATA')
+grid.plot_mesh3d(dat,TS[0])
+plt.xlabel('Horizontal Position')
+plt.ylabel('Vertical Position')
+print('done grid, now points please')
+for i in range(len(PSS)):
+    print('r',rs[i],'z',zs[i])
+    plt.scatter(rs[i:i+1],zs[i:i+1],c=colors[i],s=50)
+    #plt.plot(rs[i],zs[i],"r")#colors[i])
+plt.plot(rs[0],zs[0],"r")
+plt.show()
diff --git a/Scripts_Matthieu/CorrelationLength.py b/Scripts_Matthieu/CorrelationLength.py
new file mode 100644
index 0000000000000000000000000000000000000000..33ef24233a427f8af2ea8bfa544f6ea801b7a802
--- /dev/null
+++ b/Scripts_Matthieu/CorrelationLength.py
@@ -0,0 +1,142 @@
+import numpy as np
+import os
+import matplotlib.pyplot as plt
+import driftFunctions
+import AnalyticalPrescriptions
+import Constants
+import Utilities
+import pandas as pd
+import PlottingFunctions
+import AnalysisFunctions
+import sys
+from scipy import sparse
+import scipy.optimize as optimize
+import math
+import matplotlib
+import statistics
+from scipy import stats
+path = os.getcwd()
+
+
+xnames = ['_1','_2','_3','_4','_5','_6','_7','_8']#,'_9']#,'_10']
+Tdata = []
+
+def monexp(x,L):
+    return np.exp(-x/L)
+
+
+for i in range(len(xnames)):
+    file = open('/u/mbj/Drifts/data-for-drift-computations/Reference-FullRes2/X-TEST-HR/x-out'+xnames[i]+'/x-out/TE_TI','r')
+    Temptemp = [r.split() for r in file.readlines()]
+    Temp = []
+    for i in Temptemp:
+        Temp.extend(i)
+    Tempel = Temp[0:int(len(Temp)/2)]
+    Tempar = np.array(Tempel, dtype=float)
+    Tdata.append(Tempar)
+
+
+avTemp = Tdata[0]    
+for i in range(1,len(xnames)):
+    print(Tdata[i][50000:50010])
+    avTemp = np.add(avTemp,Tdata[i])
+avTemp = avTemp*1/len(xnames)
+
+difData = []  
+
+for i in range(len(xnames)):
+    difTemp = np.subtract(avTemp,Tdata[i])
+    difData.append(difTemp)
+    
+avDif = difData[0]    
+
+for i in range(1,len(xnames)):
+    avDif = np.add(avDif,difData[i])
+
+avDif = avDif*1/len(xnames)
+# Load position data
+posdata = AnalysisFunctions.readText(path+'/Reference-FullRes2/Output1Descr6Noise0/DRIFT_POS')
+geoData = AnalysisFunctions.readText('/u/mbj/Drifts/data-for-drift-computations/Reference-FullRes2/Output5Descr6Noise0/NBS')
+limits = AnalysisFunctions.readText(path+'/Reference-FullRes2/Output1Descr6Noise0/LIMITS')
+dim = math.floor(len(geoData)/5)
+
+
+
+polstart=150
+polend=200
+tend=15
+tstart =10
+npol=polend-polstart
+nt=tend-tstart
+corlengths = np.empty((npol,nt))
+pvalues=np.empty((npol,nt))
+for k in range(npol):
+    for j in range(nt):
+        positions = []
+        difvalues=[]
+
+        for i in range(dim):
+            coords =[float(x) for x in geoData[5*i+2].split()]
+            limT = [float(x) for x in limits[4*i+1].split()]
+            limP = [float(x) for x in limits[4*i+2].split()]
+
+            if limT[0]==j and limP[0] == k:
+                positions.append(coords)
+                difvalues.append([r[i] for r in difData])
+    
+        if len(positions) >0:
+            posArray = np.array(positions)
+            difArray = np.array(difvalues)
+            covmat = np.cov(difArray)
+            cormat = np.corrcoef(difArray)
+            cor1D = cormat[0]
+            cov1D = covmat[0]
+            distances = []
+
+            for i in range(len(cor1D)):
+                dis = math.sqrt((positions[i][0]-positions[0][0])**2+(positions[i][2]-positions[0][2])**2+((positions[i][0]+positions[0][0])/2*abs(positions[i][1]-positions[0][1]))**2)
+                distances.append(dis)
+
+
+            xl = distances
+
+            corlen=-1
+            x = np.array(xl[0:corlen])
+            y=np.array(cor1D[0:corlen])
+
+#        cv,p=stats.pearsonr(x,y)
+            popt,pcov=optimize.curve_fit(monexp,x,y)
+            print('popt',popt,'pcov',pcov)
+            corlengths[k,j] = popt
+            pvalues[k,j] = pcov
+        else:
+            corlengths[k,j] = 0
+            pvalues[k,j] = 2
+	
+corlen1D = np.reshape(corlengths,(nt*npol,-1))
+pval1D = np.reshape(pvalues,(nt*npol,-1))
+
+x=[]
+y=[]
+for i in range(len(corlen1D)):
+    if pval1D[i][0] <2:
+        x.append(corlen1D[i][0])
+        y.append(pval1D[i][0])
+print(x)
+print(y)	
+corlen1D = np.array(x)
+pval1D = np.array(y)
+print(corlen1D)
+print(pval1D)	
+plot = plt.figure(1)
+sc = plt.scatter(corlen1D,pval1D)
+plt.ylim((0,1))
+plt.savefig(path+'CorrelationLengthsRadial.png')
+
+plot2 = plt.figure(2)
+# Creating plot
+plt.hist2d(np.array(corlen1D),np.array(pval1D), bins=50,norm=matplotlib.colors.LogNorm())
+plt.colorbar()
+plt.savefig("RadialCorHistogram")
+plt.show()
+
diff --git a/Scripts_Matthieu/ErrorAnalysis.py b/Scripts_Matthieu/ErrorAnalysis.py
index 4c2ca94ba804820977d8937ca605918eec13c0f0..54f238027782dfd56767d0953e797f9a8ae12e50 100644
--- a/Scripts_Matthieu/ErrorAnalysis.py
+++ b/Scripts_Matthieu/ErrorAnalysis.py
@@ -10,6 +10,8 @@ import AnalysisFunctions
 import os
 import sys
 import numpy as np
+import matplotlib
+plt.rcParams.update({'font.size': 12})
 
 # Analyze the performance and errors of different methods
 # This script does this for given analytical prescriptions for potential and
@@ -17,12 +19,15 @@ import numpy as np
 # Specify which analytical case is considered and which cases should be 
 # calculated
 analyticalCase = sys.argv[1]
-Cases = sys.argv[2:]
+n=int(sys.argv[2])
+Study=sys.argv[3]
+Cases = sys.argv[4:]
+noise = '0'
 path = os.getcwd()
 print(path)
 print(Cases)
-methodnames = ['GG Cell-centered', 'LS Cell-centered', 'GG Nodal', 'LS Nodal','Mapping'] 
-methods = ['1','2','3','4','5']
+methodnames = ['GG Cell-centered', 'LS Cell-centered', 'GG Nodal', 'LS Nodal','Mapping', 'GG exact surfaces'] 
+methods = ['1','2','3','4','5', '6']
 # Define structures to be computed    
 efield_error_key = 'efield_er_rel_mag'
 L2Errors = []
@@ -57,22 +62,22 @@ for method in methods:
         distmax = 0
         distnontormax = 0
         print(case)
-        [caseInfo, caseInfoGB] = AnalysisFunctions.readAllInfo(method, analyticalCase, '0', case)
+        [caseInfo, caseInfoGB] = AnalysisFunctions.readAllInfo(method, analyticalCase, noise, case)
         caseErrors = caseInfo.get(efield_error_key)
         ggErrors = caseInfo.get('GGcorr')
         positions = caseInfo.get('pos')
 #	errorsDF[case] = caseErrors
         y = len(caseErrors)
-        errorsL2.append(math.sqrt(sum([x**2 for x in caseErrors]))/y)
+        errorsL2.append(math.sqrt(sum([x**2/y for x in caseErrors])))
         errorsMean.append(sum([x for x in caseErrors])/y)
-        nbCells.append(y)
-        vol = AnalysisFunctions.readText(path+'/'+case+'Output'+method+'Descr'+analyticalCase+'Noise0/VOLAV')
+        nbCells.append(y/n)
+        vol = AnalysisFunctions.readText(path+'/'+case+'Output'+method+'Descr'+analyticalCase+'Noise'+noise+'/VOLAV')
         vol = float(vol[0])
 #        print(vol)
         charLength.append((vol/10**6)**(1/3))
         maxErrors.append(max(caseErrors))
 # Read geometrical data
-        geoData = AnalysisFunctions.readText(path+'/'+case+'Output'+method+'Descr'+analyticalCase+'Noise0/NBS')	
+        geoData = AnalysisFunctions.readText(path+'/'+case+'Output'+method+'Descr'+analyticalCase+'Noise'+noise+'/NBS')	
 # Get single error
         for i in range(len(caseErrors)):
            ipos = positions[i]
@@ -136,7 +141,7 @@ typsize = 50*36*150
 #avGL =  
  
 
-upperX = 1e-1
+upperX = 5*1e-1
 lowerX = 5*1e-3
 upperY = 1e-1
 lowerY = 1e-6   
@@ -157,21 +162,23 @@ sc2 = plt.scatter(xarray,MeanErrors[1],label = methodnames[1])
 sc3 = plt.scatter(xarray,MeanErrors[2],label = methodnames[2]) 
 sc4 = plt.scatter(xarray,MeanErrors[3],label = methodnames[3]) 
 sc5 = plt.scatter(xarray,MeanErrors[4],label = methodnames[4]) 
+#sc6 = plt.scatter(xarray,MeanErrors[5],label = methodnames[5])
 ax = plt.gca()
 ax.set_yscale('log')
 ax.set_xscale('log')
 limits = [lowerX, upperX, lowerY, upperY]
-ax.legend()
+
 x = np.linspace(0.000001,1,100)
 y = x**(1)/10
 y2 = x**2/10
 plt.loglog(x,y,label = 'first order')
 plt.loglog(x,y2, label = 'second order')
+ax.legend()
 plt.axis(limits)
 plt.title('Mean Errors Description '+analyticalCase)
 plt.xlabel(xlabel)
 plt.ylabel(ylabel)
-plt.savefig(path+'/Descr'+analyticalCase+'MeanError'+xlabel+'.png')
+plt.savefig(path+'/Descr'+analyticalCase+'MeanError'+xlabel+'.pdf')
 
 plot2 = plt.figure(2)
 sc1 = plt.scatter(xarray,L2Errors[0],label = methodnames[0])
@@ -195,7 +202,7 @@ plt.axis(limits)
 plt.title('L2 Errors Description'+analyticalCase)
 plt.xlabel(xlabel)
 plt.ylabel(ylabel)
-plt.savefig(path+'/Descr'+analyticalCase+'L2Error'+xlabel+'.png')
+plt.savefig(path+'/Descr'+analyticalCase+'L2Error'+xlabel+'.pdf')
 
 
 plot3 = plt.figure(3)
@@ -204,23 +211,25 @@ sc2 = plt.scatter(xarray,MaxErrors[1],label = 'Max Error '+methodnames[1])
 sc3 = plt.scatter(xarray,MaxErrors[2],label = 'Max Error '+methodnames[2])
 sc4 = plt.scatter(xarray,MaxErrors[3],label = 'Max Error '+methodnames[3])
 sc5 = plt.scatter(xarray,MaxErrors[4],label = 'Max Error '+methodnames[4])
+#sc5 = plt.scatter(xarray,MaxErrors[5],label = 'Max Error '+methodnames[5])
 
 v2 = plt.vlines(typsize,lowerY,upperY, color = 'g')
 ax = plt.gca()
 ax.set_yscale('log')
 ax.set_xscale('log')
 limits = [lowerX, upperX, lowerY, upperY]
-ax.legend()
+
 x = np.linspace(0.000001,1,100)
 y = x**(1)/10
 y2 = x**2/10
 plt.loglog(x,y,label = 'first order')
 plt.loglog(x,y2, label = 'second order')
+ax.legend()
 plt.axis(limits)
 plt.title('Max Errors Description'+analyticalCase)
 plt.xlabel(xlabel)
 plt.ylabel(ylabel)
-plt.savefig(path+'/Descr'+analyticalCase+'MaxError'+xlabel+'.png')
+plt.savefig(path+'/Descr'+analyticalCase+'MaxError'+xlabel+'.pdf')
 
 plot4 = plt.figure(4)
 sc1 = plt.scatter(xarray,GGErrors[0],label = methodnames[0])
@@ -234,17 +243,18 @@ ax = plt.gca()
 ax.set_yscale('log')
 ax.set_xscale('log')
 limits = [lowerX, upperX, lowerY, upperY]
-ax.legend()
+
 x = np.linspace(0.000001,1,100)
 y = x**(1)/10
 y2 = x**2/10
 or1 = plt.loglog(x,y,label = 'first order')
 or2 = plt.loglog(x,y2, label = 'second order')
+ax.legend()
 plt.axis(limits)
-plt.title('GG Correction Error'+analyticalCase)
+plt.title('GG Correction Error Description'+analyticalCase)
 plt.xlabel(xlabel)
 plt.ylabel(ylabel)
-plt.savefig(path+'/Descr'+analyticalCase+'GG correction Error'+xlabel+'.png')
+plt.savefig(path+'/Descr'+analyticalCase+'GG correction Error'+xlabel+'.pdf')
 
 
 xlabel = 'Max Cell Length [m]'
@@ -267,17 +277,18 @@ ax = plt.gca()
 ax.set_yscale('log')
 ax.set_xscale('log')
 limits = [lowerX, upperX, lowerY, upperY]
-ax.legend()
+
 x = np.linspace(0.000001,1,100)
 y = x**(1)/400
 y2 = x**2/400
 plt.loglog(x,y,label = 'first order')
 plt.loglog(x,y2, label = 'second order')
+ax.legend()
 plt.axis(limits)
 plt.title('Mean Errors Description'+analyticalCase)
 plt.xlabel(xlabel)
 plt.ylabel(ylabel)
-plt.savefig(path+'/Descr'+analyticalCase+'MeanError'+xlabel+'.png')
+plt.savefig(path+'/Descr'+analyticalCase+'MeanError'+xlabel+'.pdf')
 
 plot6 = plt.figure(6)
 sc1 = plt.scatter(xarray,L2Errors[0],label = methodnames[0])
@@ -291,17 +302,18 @@ ax = plt.gca()
 ax.set_yscale('log')
 ax.set_xscale('log')
 limits = [lowerX, upperX, lowerY, upperY]
-ax.legend()
+
 x = np.linspace(0.000001,1,100)
 y = x**(1)/100
 y2 = x**2/100
 plt.loglog(x,y,label = 'first order')
 plt.loglog(x,y2, label = 'second order')
+ax.legend()
 plt.axis(limits)
 plt.title('L2 Errors Description'+analyticalCase)
 plt.xlabel(xlabel)
 plt.ylabel(ylabel)
-plt.savefig(path+'/Descr'+analyticalCase+'L2Error'+xlabel+'.png')
+plt.savefig(path+'/Descr'+analyticalCase+'L2Error'+xlabel+'.pdf')
 
 
 plot7 = plt.figure(7)
@@ -316,17 +328,18 @@ ax = plt.gca()
 ax.set_yscale('log')
 ax.set_xscale('log')
 limits = [lowerX, upperX, lowerY, upperY]
-ax.legend()
+
 x = np.linspace(0.000001,1,100)
 y = x**(1)/200
 y2 = x**2/200
 plt.loglog(x,y,label = 'first order')
 plt.loglog(x,y2, label = 'second order')
 plt.axis(limits)
+ax.legend()
 plt.title('Max Errors Description'+analyticalCase)
 plt.xlabel(xlabel)
 plt.ylabel(ylabel)
-plt.savefig(path+'/Descr'+analyticalCase+'MaxError'+xlabel+'.png')
+plt.savefig(path+'/Descr'+analyticalCase+'MaxError'+xlabel+'.pdf')
 
 plot8 = plt.figure(8)
 sc1 = plt.scatter(xarray,GGErrors[0],label = methodnames[0])
@@ -340,17 +353,18 @@ ax = plt.gca()
 ax.set_yscale('log')
 ax.set_xscale('log')
 limits = [lowerX, upperX, lowerY, upperY]
-ax.legend()
+
 x = np.linspace(0.000001,1,100)
 y = x**(1)/100
 y2 = x**2/100
 plt.loglog(x,y,label = 'first order')
 plt.loglog(x,y2, label = 'second order')
 plt.axis(limits)
-plt.title('GG Correction Error'+analyticalCase+xlabel)
+ax.legend()
+plt.title('GG Correction Error Description'+analyticalCase+xlabel)
 plt.xlabel(xlabel)
 plt.ylabel(ylabel)
-plt.savefig(path+'/Descr'+analyticalCase+'GG correction Error'+xlabel+'.png')
+plt.savefig(path+'/Descr'+analyticalCase+'GG correction Error'+xlabel+'.pdf')
 
 ############################################################################
 # Max length, non toroidal
@@ -358,7 +372,7 @@ xlabel = 'Max Length (excluding toroidal direction) [m]'
 ylabel = 'Relative Magnitude Error'
 xarray = distancesnontor
 
-upperX = 1e-1
+upperX = 5*1e-1
 lowerX = 5*1e-3 
 
 plot9 = plt.figure(9)
@@ -372,17 +386,18 @@ ax = plt.gca()
 ax.set_yscale('log')
 ax.set_xscale('log')
 limits = [lowerX, upperX, lowerY, upperY]
-ax.legend()
+
 x = np.linspace(0.000001,1,100)
 y = x**(1)/10
 y2 = x**2/10
 plt.loglog(x,y,label = 'first order')
 plt.loglog(x,y2, label = 'second order')
 plt.axis(limits)
+ax.legend()
 plt.title('Mean Errors Description'+analyticalCase)
 plt.xlabel(xlabel)
 plt.ylabel(ylabel)
-plt.savefig(path+'/Descr'+analyticalCase+'MeanError'+xlabel+'.png')
+plt.savefig(path+'/Descr'+analyticalCase+'MeanError'+xlabel+'.pdf')
 
 plot10 = plt.figure(10)
 sc1 = plt.scatter(xarray,L2Errors[0],label = methodnames[0])
@@ -396,17 +411,18 @@ ax = plt.gca()
 ax.set_yscale('log')
 ax.set_xscale('log')
 limits = [lowerX, upperX, lowerY, upperY]
-ax.legend()
+
 x = np.linspace(0.000001,1,100)
 y = x**(1)/100
 y2 = x**2/100
 plt.loglog(x,y,label = 'first order')
 plt.loglog(x,y2, label = 'second order')
 plt.axis(limits)
+ax.legend()
 plt.title('L2 Errors Description'+analyticalCase)
 plt.xlabel(xlabel)
 plt.ylabel(ylabel)
-plt.savefig(path+'/Descr'+analyticalCase+'L2Error'+xlabel+'.png')
+plt.savefig(path+'/Descr'+analyticalCase+'L2Error'+xlabel+'.pdf')
 
 
 plot11 = plt.figure(11)
@@ -421,17 +437,18 @@ ax = plt.gca()
 ax.set_yscale('log')
 ax.set_xscale('log')
 limits = [lowerX, upperX, lowerY, upperY]
-ax.legend()
+
 x = np.linspace(0.000001,1,100)
 y = x**(1)/10
 y2 = x**2/10
 plt.loglog(x,y,label = 'first order')
 plt.loglog(x,y2, label = 'second order')
 plt.axis(limits)
+ax.legend()
 plt.title('Max Errors Description'+analyticalCase)
 plt.xlabel(xlabel)
 plt.ylabel(ylabel)
-plt.savefig(path+'/Descr'+analyticalCase+'MaxError'+xlabel+'.png')
+plt.savefig(path+'/Descr'+analyticalCase+'MaxError'+xlabel+'.pdf')
 
 plot12 = plt.figure(12)
 sc1 = plt.scatter(xarray,GGErrors[0],label = methodnames[0])
@@ -445,25 +462,26 @@ ax = plt.gca()
 ax.set_yscale('log')
 ax.set_xscale('log')
 limits = [lowerX, upperX, lowerY, upperY]
-ax.legend()
+
 x = np.linspace(0.000001,1,100)
 y = x**(1)/10
 y2 = x**2/10
 plt.loglog(x,y,label = 'first order')
 plt.loglog(x,y2, label = 'second order')
+ax.legend()
 plt.axis(limits)
-plt.title('GG Correction Error'+analyticalCase)
+plt.title('GG Correction Error Description'+analyticalCase)
 plt.xlabel(xlabel)
 plt.ylabel(ylabel)
-plt.savefig(path+'/Descr'+analyticalCase+'GG correction Error'+xlabel+'.png')
+plt.savefig(path+'/Descr'+analyticalCase+'GG correction Error'+xlabel+'.pdf')
 
 ###########################################################################"
 # Nb of cells
-xlabel = 'Number of cells'
+xlabel = Study +' Resolution (Nb Cells) [-]'
 ylabel = 'Relative Magnitude Error'
 xarray = nbs[0]
-upperX = 1e7
-lowerX = 1000
+upperX = 1e2
+lowerX = 1
 upperY = 1e-1
 lowerY = 1e-6 
 
@@ -475,21 +493,22 @@ sc2 = plt.scatter(xarray,MeanErrors[1],label = methodnames[1])
 sc3 = plt.scatter(xarray,MeanErrors[2],label = methodnames[2]) 
 sc4 = plt.scatter(xarray,MeanErrors[3],label = methodnames[3]) 
 sc5 = plt.scatter(xarray,MeanErrors[4],label = methodnames[4]) 
+sc6 = plt.scatter(xarray,MeanErrors[5],label = methodnames[5]) 
 ax = plt.gca()
 ax.set_yscale('log')
 ax.set_xscale('log')
 limits = [lowerX, upperX, lowerY, upperY]
+x = np.linspace(1,1000,1000)
+y = 1/x*1/100
+y2 = 1/x**2
+plt.loglog(x,y,label = 'first order')
+plt.loglog(x,y2, label = 'second order')
 ax.legend()
-x = np.linspace(0.000001,1,100)
-y = x**(1)/100
-y2 = x**2/100
-plt.loglog(x,y)
-plt.loglog(x,y2)
 plt.axis(limits)
-plt.title('Mean Errors Description'+analyticalCase+xlabel)
+plt.title('Mean Errors Description'+analyticalCase)
 plt.xlabel(xlabel)
 plt.ylabel(ylabel)
-plt.savefig(path+'/Descr'+analyticalCase+'MeanError'+xlabel+'.png')
+plt.savefig(path+'/Descr'+analyticalCase+'MeanError'+xlabel+'.pdf')
 
 plot14 = plt.figure(14)
 sc1 = plt.scatter(xarray,L2Errors[0],label = methodnames[0])
@@ -497,23 +516,24 @@ sc2 = plt.scatter(xarray,L2Errors[1],label = methodnames[1])
 sc3 = plt.scatter(xarray,L2Errors[2],label = methodnames[2])
 sc4 = plt.scatter(xarray,L2Errors[3],label = methodnames[3])
 sc5 = plt.scatter(xarray,L2Errors[4],label = methodnames[4])
-
+sc = plt.scatter(xarray,L2Errors[5],label = methodnames[5])
 v2 = plt.vlines(typsize,lowerY,upperY, color = 'g')
 ax = plt.gca()
 ax.set_yscale('log')
 ax.set_xscale('log')
 limits = [lowerX, upperX, lowerY, upperY]
 ax.legend()
-x = np.linspace(0.000001,1,100)
-y = x**(1)/100
-y2 = x**2/100
+x = np.linspace(1,1000,1000)
+y = 1/x*1/100
+y2 = 1/x**2
 plt.loglog(x,y,label = 'first order')
 plt.loglog(x,y2, label = 'second order')
+ax.legend()
 plt.axis(limits)
 plt.title('L2 Errors Description'+analyticalCase)
 plt.xlabel(xlabel)
 plt.ylabel(ylabel)
-plt.savefig(path+'/Descr'+analyticalCase+'L2Error'+xlabel+'.png')
+plt.savefig(path+'/Descr'+analyticalCase+'L2Error'+xlabel+'.pdf')
 
 
 plot15 = plt.figure(15)
@@ -522,23 +542,23 @@ sc2 = plt.scatter(xarray,MaxErrors[1],label = 'Max Error '+methodnames[1])
 sc3 = plt.scatter(xarray,MaxErrors[2],label = 'Max Error '+methodnames[2])
 sc4 = plt.scatter(xarray,MaxErrors[3],label = 'Max Error '+methodnames[3])
 sc5 = plt.scatter(xarray,MaxErrors[4],label = 'Max Error '+methodnames[4])
-
+sc = plt.scatter(xarray,MaxErrors[5],label = 'Max Error '+methodnames[5])
 v2 = plt.vlines(typsize,lowerY,upperY, color = 'g')
 ax = plt.gca()
 ax.set_yscale('log')
 ax.set_xscale('log')
 limits = [lowerX, upperX, lowerY, upperY]
-ax.legend()
-x = np.linspace(0.000001,1,100)
-y = x**(1)/100
-y2 = x**2/100
+x = np.linspace(1,1000,1000)
+y = 1/x*1/100
+y2 = 1/x**2
 plt.loglog(x,y,label = 'first order')
 plt.loglog(x,y2, label = 'second order')
+ax.legend()
 plt.axis(limits)
 plt.title('Max Errors Description'+analyticalCase)
 plt.xlabel(xlabel)
 plt.ylabel(ylabel)
-plt.savefig(path+'/Descr'+analyticalCase+'MaxError'+xlabel+'.png')
+plt.savefig(path+'/Descr'+analyticalCase+'MaxError'+xlabel+'.pdf')
 
 plot16 = plt.figure(16)
 sc1 = plt.scatter(xarray,GGErrors[0],label = methodnames[0])
@@ -553,16 +573,17 @@ ax.set_yscale('log')
 ax.set_xscale('log')
 limits = [lowerX, upperX, lowerY, upperY]
 ax.legend()
-x = np.linspace(0.000001,1,100)
-y = x**(1)/100
-y2 = x**2/100
+x = np.linspace(1,1000,1000)
+y = 1/x*1/100
+y2 = 1/x**2
 plt.loglog(x,y,label = 'first order')
 plt.loglog(x,y2, label = 'second order')
+ax.legend()
 plt.axis(limits)
-plt.title('GG Correction Error'+analyticalCase)
+plt.title('GG Correction Error Description'+analyticalCase)
 plt.xlabel(xlabel)
 plt.ylabel(ylabel)
-plt.savefig(path+'/Descr'+analyticalCase+'GG correction Error'+xlabel+'.png')
+plt.savefig(path+'/Descr'+analyticalCase+'GG correction Error'+xlabel+'.pdf')
 
 
 
@@ -572,7 +593,7 @@ upperX = 1e0
 lowerX = 5*1e-2
 upperY = 1e-1
 lowerY = 1e-6 
-xlabel = 'Max Connection length'
+xlabel = 'Max Cell length [m]'
 ylabel = 'Relative Magnitude Error'
 
 plot = plt.figure(17)
@@ -587,22 +608,23 @@ ax = plt.gca()
 ax.set_yscale('log')
 ax.set_xscale('log')
 limits = [lowerX, upperX, lowerY, upperY]
-ax.legend()
+
 x = np.linspace(0.000001,1,100)
-y = x**(1)/10
-y2 = x**2/10
+y = x**(1)/100
+y2 = x**2/100
 plt.loglog(x,y,label = 'first order')
 plt.loglog(x,y2, label = 'second order')
+ax.legend()
 plt.axis(limits)
 plt.title('Single Cell Error'+analyticalCase)
 plt.xlabel(xlabel)
 plt.ylabel(ylabel)
-plt.savefig(path+'/Descr'+analyticalCase+'SingleError.png')
+plt.savefig(path+'/Descr'+analyticalCase+'SingleError.pdf')
 
 
 upperX = 1e-1
 lowerX = 5*1e-3
-xlabel = 'Max Connection length excluding toroidal direction'
+xlabel = 'Max Cell length (excluding toroidal direction) [m]'
 plot = plt.figure(18)
 sc1 = plt.scatter(singledistancesnontor,SingleErrors[0],label = methodnames[0])
 sc2 = plt.scatter(singledistancesnontor,SingleErrors[1],label = methodnames[1])
@@ -615,17 +637,18 @@ ax = plt.gca()
 ax.set_yscale('log')
 ax.set_xscale('log')
 limits = [lowerX, upperX, lowerY, upperY]
-ax.legend()
+
 x = np.linspace(0.000001,1,100)
-y = x**(1)/10
-y2 = x**2/10
+y = x**(1)/100
+y2 = x**2/100
 plt.loglog(x,y, label='first order')
 plt.loglog(x,y2, label='second order')
 plt.axis(limits)
-plt.title('Single Cell Error Non Tor '+analyticalCase)
+ax.legend()
+plt.title('Single Cell Error Description'+analyticalCase)
 plt.xlabel(xlabel)
 plt.ylabel(ylabel)
-plt.savefig(path+'/Descr'+analyticalCase+'Single Cell Error.png')
+plt.savefig(path+'/Descr'+analyticalCase+'Single Cell Error.pdf')
 
 
 
diff --git a/Scripts_Matthieu/ErrorAnalysisCorLen.py b/Scripts_Matthieu/ErrorAnalysisCorLen.py
new file mode 100644
index 0000000000000000000000000000000000000000..0b19cdf18d9b793f90818a97d6aab0b6e654c4bf
--- /dev/null
+++ b/Scripts_Matthieu/ErrorAnalysisCorLen.py
@@ -0,0 +1,110 @@
+import math
+import driftFunctions
+import AnalyticalPrescriptions
+import Constants
+import Utilities
+import pandas as pd
+import matplotlib.pyplot as plt
+import PlottingFunctions
+import AnalysisFunctions
+import os
+import sys
+
+# Analyze the performance and errors of different methods
+# This script does this for given analytical prescriptions for potential and
+# magnetic field
+# Specify which analytical case is considered and which cases should be 
+# calculated
+analyticalCase = sys.argv[1]
+Cases = sys.argv[2:]
+path = os.getcwd()
+print(path)
+#print(Cases)
+case = ''
+methods = ['11', '12', '13', '14', '15'] 
+methodnames=['GG Cell-centered', 'LS Cell-centered','GG Nodal', 'LS Nodal', 'Mapping']
+corLevels = [1,2,4,8,16,32]
+corNames = ['1','2','3','4','5','6']
+noise='23'
+# Define structures to be computed    
+efield_error_key = 'efield_er_rel_mag'
+exb_error_key = 'edrift_er_rel_mag'
+avErrors = []
+avErrorsD = []
+MaxErrors = []
+nbs = []
+
+for method in methods:
+    errors = []
+    errorsD = []
+    nbCells = []
+    maxErrors = []
+    for case in Cases:
+        print(case)
+        [caseInfo,caseInfoGB] = AnalysisFunctions.readAllInfo(method, analyticalCase,noise, case)
+        caseErrors = caseInfo.get(efield_error_key)
+        caseErrorsD = caseInfo.get(exb_error_key)
+        y = len(caseErrors)
+        errors.append(sum([x/y for x in caseErrors]))
+        errorsD.append(sum([x/y for x in caseErrorsD]))
+        nbCells.append(y)
+        maxErrors.append(max(caseErrors))
+	
+    avErrors.append(errors)
+    avErrorsD.append(errorsD)
+    MaxErrors.append(maxErrors)
+    nbs.append(nbCells)
+    
+print(avErrors)
+print(nbs) 
+print(corLevels)
+# Compute measure for gradient lengths and typical grid size
+typsize = 50*36*150
+#maxGL = 
+#avGL =  
+ 
+
+upperX = 40
+lowerX = -0.01   
+upperY = 10
+lowerY = 1e-5   
+plot1 = plt.figure(1)
+#v1 = plt.vlines(typsize,lowerY,upperY, color = 'g')
+sc1 = plt.scatter(corLevels,avErrors[0],color = 'red',label = 'Average Error '+methodnames[0])
+sc2 = plt.scatter(corLevels,avErrors[1],color = 'green',label = 'Average Error '+methodnames[1])
+sc3 = plt.scatter(corLevels,avErrors[2],color = 'blue',label = 'Average Error '+methodnames[2]) 
+sc4 = plt.scatter(corLevels,avErrors[3],color = 'yellow',label = 'Average Error '+methodnames[3]) 
+sc5 = plt.scatter(corLevels,avErrors[4],color = 'black',label = 'Average Error '+methodnames[4]) 
+ax = plt.gca()
+ax.set_yscale('log')
+ax.set_xscale('log')
+limits = [lowerX, upperX, lowerY, upperY]
+ax.legend()
+plt.axis(limits)
+plt.title('Average Errors Description'+analyticalCase)
+plt.xlabel('Correlation Length [cm]')
+plt.ylabel('Relative Error')
+plt.savefig(path+'/Descr'+analyticalCase+'Error.pdf')
+
+
+upperX = 40
+lowerX = -0.01   
+upperY = 1
+lowerY = 1e-2  
+plot1 = plt.figure(2)
+#v1 = plt.vlines(typsize,lowerY,upperY, color = 'g')
+sc1 = plt.scatter(corLevels,avErrors[0],color = 'red',label = 'Average Error '+methodnames[0])
+sc2 = plt.scatter(corLevels,avErrors[1],color = 'green',label = 'Average Error '+methodnames[1])
+sc3 = plt.scatter(corLevels,avErrors[2],color = 'blue',label = 'Average Error '+methodnames[2]) 
+sc4 = plt.scatter(corLevels,avErrors[3],color = 'yellow',label = 'Average Error '+methodnames[3]) 
+sc5 = plt.scatter(corLevels,avErrors[4],color = 'black',label = 'Average Error '+methodnames[4]) 
+
+limits = [lowerX, upperX, lowerY, upperY]
+ax.legend()
+plt.axis(limits)
+plt.title('Average Errors Description'+analyticalCase)
+plt.xlabel('Correlation Length [cm]')
+plt.ylabel('Relative Error')
+plt.savefig(path+'/Descr'+analyticalCase+'NoiseError.pdf')
+
+plt.show() 
diff --git a/Scripts_Matthieu/ErrorAnalysisLSQ.py b/Scripts_Matthieu/ErrorAnalysisLSQ.py
new file mode 100644
index 0000000000000000000000000000000000000000..73bed8e6fda77de68f88e90109329dc2465e3023
--- /dev/null
+++ b/Scripts_Matthieu/ErrorAnalysisLSQ.py
@@ -0,0 +1,651 @@
+import math
+import driftFunctions
+import AnalyticalPrescriptions
+import Constants
+import Utilities
+import pandas as pd
+import matplotlib.pyplot as plt
+import PlottingFunctions
+import AnalysisFunctions
+import os
+import sys
+import numpy as np
+
+# Analyze the performance and errors of different methods
+# This script does this for given analytical prescriptions for potential and
+# magnetic field
+# Specify which analytical case is considered and which cases should be 
+# calculated
+analyticalCase = sys.argv[1]
+Cases = sys.argv[2:]
+noise = '0'
+coords = '1'
+path = os.getcwd()
+print(path)
+print(Cases)
+methodnames = ['LS_q0', 'LS_q1', 'LS_q1.5', 'LS_q2', 'GG'] 
+methods=['30','31','31.5','32','1']
+# Define structures to be computed    
+efield_error_key = 'efield_er_rel_mag'
+L2Errors = []
+MeanErrors = []
+SingleErrors = []
+MaxErrors = []
+GGErrors = []
+lens = []
+
+nbs = []
+r = 530
+phi = 0.15
+z = 45
+gg = 10
+
+for method in methods:
+
+#    errorsDF = pd.DataFrame()
+    errorsL2 = []
+    errorsMean = []
+    errorsSingle = []
+    nbCells = []
+    charLength = []
+    maxErrors = []
+    errorsGG = []
+    distances = []
+    distancesnontor = []
+    singledistances = []
+    singledistancesnontor = []
+    
+    for case in Cases:
+        siner = 1000
+        dmin = 1000000000
+        distmax = 0
+        distnontormax = 0
+        print(case)
+        [caseInfo, caseInfoGB] = AnalysisFunctions.readAllInfo(method, analyticalCase, noise, case)
+        caseErrors = caseInfo.get(efield_error_key)
+        ggErrors = caseInfo.get('GGcorr')
+        positions = caseInfo.get('pos')
+#	errorsDF[case] = caseErrors
+        y = len(caseErrors)
+        errorsL2.append(math.sqrt(sum([x**2/y for x in caseErrors])))
+        errorsMean.append(sum([x for x in caseErrors])/y)
+        nbCells.append(y)
+        vol = AnalysisFunctions.readText(path+'/'+case+'Output'+method+'Descr'+analyticalCase+'Noise'+noise+'/VOLAV')
+        vol = float(vol[0])
+#        print(vol)
+        charLength.append((vol/10**6)**(1/3))
+        maxErrors.append(max(caseErrors))
+# Read geometrical data
+        geoData = AnalysisFunctions.readText(path+'/'+case+'Output'+method+'Descr'+analyticalCase+'Noise'+noise+'/NBS')	
+# Get single error
+        for i in range(len(caseErrors)):
+           ipos = positions[i]
+           d = (z-ipos[2])**2+(r-ipos[0])**2+(phi-ipos[0])**2
+           if d<dmin:
+               siner = caseErrors[i] 
+               gg = ggErrors[i]
+               dmin = d
+               singlepos = ipos
+        for i in range(math.floor(len(geoData)/5)):
+            ipos = [float(x) for x in geoData[5*i+2].split()]
+            dis = [float(x) for x in geoData[5*i+3].split()] + [float(x) for x in geoData[5*i+4].split()]
+            bound = int(geoData[5*i+1])
+            dist = max(dis)
+#            print(bound, dist,distmax)
+#            print(bound == 0)
+#            print(type(bound))
+            distnontor = max(dis[0:4])
+            if dist > distmax and bound == 0:
+                distmax = dist
+            if distnontor > distnontormax and bound == 0:
+                distnontormax = distnontor
+            if np.array_equal(ipos,singlepos):
+                sindist = dist
+                sindistnontor = distnontor
+                
+        errorsSingle.append(siner)
+        errorsGG.append(gg)	
+        distances.append(distmax)
+        distancesnontor.append(distnontormax)
+        singledistances.append(sindist)
+        singledistancesnontor.append(sindistnontor)
+#        print(errors)
+#        print(maxErrors)
+#        print(nbCells)
+	
+    L2Errors.append(errorsL2)
+    MeanErrors.append(errorsMean)
+    SingleErrors.append(errorsSingle)
+    MaxErrors.append(maxErrors)
+    GGErrors.append(errorsGG)
+    nbs.append(nbCells)
+    lens.append(charLength)
+#    plot = plt.figure()
+#    sns.violinplot(data = case[methods[0]])
+#    plt.savefig(path+'/Descr'+analyticalCase+method+'Violin.png')
+print('L2',L2Errors)
+print('Mean', MeanErrors)
+print('Single', SingleErrors)
+print('Max',MaxErrors)
+print('GG',GGErrors)
+print(nbs) 
+print('distances', distances)
+print('distancesnontor', distancesnontor)
+print(singledistances)
+print(singledistancesnontor)
+print('char length', lens)
+# Compute measure for gradient lengths and typical grid size
+typsize = 50*36*150
+#maxGL = 
+#avGL =  
+ 
+
+upperX = 5*1e-1
+lowerX = 5*1e-3
+upperY = 1e-1
+lowerY = 1e-6   
+
+##########################################################
+# Characteristic Length
+
+
+xlabel = 'Characteristic Cell Length [m]'
+ylabel = 'Relative Magnitude Error'
+xarray = lens[0]
+
+
+plot1 = plt.figure(1)
+v1 = plt.vlines(typsize,lowerY,upperY, color = 'g')
+sc1 = plt.scatter(xarray,MeanErrors[0],label = methodnames[0])
+sc2 = plt.scatter(xarray,MeanErrors[1],label = methodnames[1])
+sc3 = plt.scatter(xarray,MeanErrors[2],label = methodnames[2]) 
+sc4 = plt.scatter(xarray,MeanErrors[3],label = methodnames[3]) 
+sc5 = plt.scatter(xarray,MeanErrors[4],label = methodnames[4]) 
+ax = plt.gca()
+ax.set_yscale('log')
+ax.set_xscale('log')
+limits = [lowerX, upperX, lowerY, upperY]
+
+x = np.linspace(0.000001,1,100)
+y = x**(1)/10
+y2 = x**2/10
+plt.loglog(x,y,label = 'first order')
+plt.loglog(x,y2, label = 'second order')
+ax.legend()
+plt.axis(limits)
+plt.title('Mean Errors Description '+analyticalCase)
+plt.xlabel(xlabel)
+plt.ylabel(ylabel)
+plt.savefig(path+'/Descr'+analyticalCase+'MeanError'+xlabel+'.pdf')
+
+plot2 = plt.figure(2)
+sc1 = plt.scatter(xarray,L2Errors[0],label = methodnames[0])
+sc2 = plt.scatter(xarray,L2Errors[1],label = methodnames[1])
+sc3 = plt.scatter(xarray,L2Errors[2],label = methodnames[2])
+sc4 = plt.scatter(xarray,L2Errors[3],label = methodnames[3])
+sc5 = plt.scatter(xarray,L2Errors[4],label = methodnames[4])
+
+v2 = plt.vlines(typsize,lowerY,upperY, color = 'g')
+ax = plt.gca()
+ax.set_yscale('log')
+ax.set_xscale('log')
+limits = [lowerX, upperX, lowerY, upperY]
+ax.legend()
+x = np.linspace(0.000001,1,100)
+y = x**(1)/100
+y2 = x**2/100
+plt.loglog(x,y,label = 'first order')
+plt.loglog(x,y2, label = 'second order')
+plt.axis(limits)
+plt.title('L2 Errors Description'+analyticalCase)
+plt.xlabel(xlabel)
+plt.ylabel(ylabel)
+plt.savefig(path+'/Descr'+analyticalCase+'L2Error'+xlabel+'.pdf')
+
+
+plot3 = plt.figure(3)
+sc1 = plt.scatter(xarray,MaxErrors[0],label = 'Max Error '+methodnames[0])
+sc2 = plt.scatter(xarray,MaxErrors[1],label = 'Max Error '+methodnames[1])
+sc3 = plt.scatter(xarray,MaxErrors[2],label = 'Max Error '+methodnames[2])
+sc4 = plt.scatter(xarray,MaxErrors[3],label = 'Max Error '+methodnames[3])
+sc5 = plt.scatter(xarray,MaxErrors[4],label = 'Max Error '+methodnames[4])
+
+v2 = plt.vlines(typsize,lowerY,upperY, color = 'g')
+ax = plt.gca()
+ax.set_yscale('log')
+ax.set_xscale('log')
+limits = [lowerX, upperX, lowerY, upperY]
+
+x = np.linspace(0.000001,1,100)
+y = x**(1)/10
+y2 = x**2/10
+plt.loglog(x,y,label = 'first order')
+plt.loglog(x,y2, label = 'second order')
+ax.legend()
+plt.axis(limits)
+plt.title('Max Errors Description'+analyticalCase)
+plt.xlabel(xlabel)
+plt.ylabel(ylabel)
+plt.savefig(path+'/Descr'+analyticalCase+'MaxError'+xlabel+'.pdf')
+
+plot4 = plt.figure(4)
+sc1 = plt.scatter(xarray,GGErrors[0],label = methodnames[0])
+sc2 = plt.scatter(xarray,GGErrors[1],label = methodnames[1])
+sc3 = plt.scatter(xarray,GGErrors[2],label = methodnames[2])
+sc4 = plt.scatter(xarray,GGErrors[3],label = methodnames[3])
+sc5 = plt.scatter(xarray,GGErrors[4],label = methodnames[4])
+
+v2 = plt.vlines(typsize,lowerY,upperY, color = 'g')
+ax = plt.gca()
+ax.set_yscale('log')
+ax.set_xscale('log')
+limits = [lowerX, upperX, lowerY, upperY]
+
+x = np.linspace(0.000001,1,100)
+y = x**(1)/10
+y2 = x**2/10
+or1 = plt.loglog(x,y,label = 'first order')
+or2 = plt.loglog(x,y2, label = 'second order')
+ax.legend()
+plt.axis(limits)
+plt.title('GG Correction Error Description'+analyticalCase)
+plt.xlabel(xlabel)
+plt.ylabel(ylabel)
+plt.savefig(path+'/Descr'+analyticalCase+'GG correction Error'+xlabel+'.pdf')
+
+
+xlabel = 'Max Cell Length [m]'
+ylabel = 'Relative Magnitude Error'
+xarray = distances
+upperX = 1e0
+lowerX = 5*1e-2
+
+######################################################################
+# Max distance
+
+plot5 = plt.figure(5)
+v1 = plt.vlines(typsize,lowerY,upperY, color = 'g')
+sc1 = plt.scatter(xarray,MeanErrors[0],label = methodnames[0])
+sc2 = plt.scatter(xarray,MeanErrors[1],label = methodnames[1])
+sc3 = plt.scatter(xarray,MeanErrors[2],label = methodnames[2]) 
+sc4 = plt.scatter(xarray,MeanErrors[3],label = methodnames[3]) 
+sc5 = plt.scatter(xarray,MeanErrors[4],label = methodnames[4]) 
+ax = plt.gca()
+ax.set_yscale('log')
+ax.set_xscale('log')
+limits = [lowerX, upperX, lowerY, upperY]
+
+x = np.linspace(0.000001,1,100)
+y = x**(1)/400
+y2 = x**2/400
+plt.loglog(x,y,label = 'first order')
+plt.loglog(x,y2, label = 'second order')
+ax.legend()
+plt.axis(limits)
+plt.title('Mean Errors Description'+analyticalCase)
+plt.xlabel(xlabel)
+plt.ylabel(ylabel)
+plt.savefig(path+'/Descr'+analyticalCase+'MeanError'+xlabel+'.pdf')
+
+plot6 = plt.figure(6)
+sc1 = plt.scatter(xarray,L2Errors[0],label = methodnames[0])
+sc2 = plt.scatter(xarray,L2Errors[1],label = methodnames[1])
+sc3 = plt.scatter(xarray,L2Errors[2],label = methodnames[2])
+sc4 = plt.scatter(xarray,L2Errors[3],label = methodnames[3])
+sc5 = plt.scatter(xarray,L2Errors[4],label = methodnames[4])
+
+v2 = plt.vlines(typsize,lowerY,upperY, color = 'g')
+ax = plt.gca()
+ax.set_yscale('log')
+ax.set_xscale('log')
+limits = [lowerX, upperX, lowerY, upperY]
+
+x = np.linspace(0.000001,1,100)
+y = x**(1)/100
+y2 = x**2/100
+plt.loglog(x,y,label = 'first order')
+plt.loglog(x,y2, label = 'second order')
+ax.legend()
+plt.axis(limits)
+plt.title('L2 Errors Description'+analyticalCase)
+plt.xlabel(xlabel)
+plt.ylabel(ylabel)
+plt.savefig(path+'/Descr'+analyticalCase+'L2Error'+xlabel+'.pdf')
+
+
+plot7 = plt.figure(7)
+sc1 = plt.scatter(xarray,MaxErrors[0],label = 'Max Error '+methodnames[0])
+sc2 = plt.scatter(xarray,MaxErrors[1],label = 'Max Error '+methodnames[1])
+sc3 = plt.scatter(xarray,MaxErrors[2],label = 'Max Error '+methodnames[2])
+sc4 = plt.scatter(xarray,MaxErrors[3],label = 'Max Error '+methodnames[3])
+sc5 = plt.scatter(xarray,MaxErrors[4],label = 'Max Error '+methodnames[4])
+
+v2 = plt.vlines(typsize,lowerY,upperY, color = 'g')
+ax = plt.gca()
+ax.set_yscale('log')
+ax.set_xscale('log')
+limits = [lowerX, upperX, lowerY, upperY]
+
+x = np.linspace(0.000001,1,100)
+y = x**(1)/200
+y2 = x**2/200
+plt.loglog(x,y,label = 'first order')
+plt.loglog(x,y2, label = 'second order')
+plt.axis(limits)
+ax.legend()
+plt.title('Max Errors Description'+analyticalCase)
+plt.xlabel(xlabel)
+plt.ylabel(ylabel)
+plt.savefig(path+'/Descr'+analyticalCase+'MaxError'+xlabel+'.pdf')
+
+plot8 = plt.figure(8)
+sc1 = plt.scatter(xarray,GGErrors[0],label = methodnames[0])
+sc2 = plt.scatter(xarray,GGErrors[1],label = methodnames[1])
+sc3 = plt.scatter(xarray,GGErrors[2],label = methodnames[2])
+sc4 = plt.scatter(xarray,GGErrors[3],label = methodnames[3])
+sc5 = plt.scatter(xarray,GGErrors[4],label = methodnames[4])
+
+v2 = plt.vlines(typsize,lowerY,upperY, color = 'g')
+ax = plt.gca()
+ax.set_yscale('log')
+ax.set_xscale('log')
+limits = [lowerX, upperX, lowerY, upperY]
+
+x = np.linspace(0.000001,1,100)
+y = x**(1)/100
+y2 = x**2/100
+plt.loglog(x,y,label = 'first order')
+plt.loglog(x,y2, label = 'second order')
+plt.axis(limits)
+ax.legend()
+plt.title('GG Correction Error Description'+analyticalCase+xlabel)
+plt.xlabel(xlabel)
+plt.ylabel(ylabel)
+plt.savefig(path+'/Descr'+analyticalCase+'GG correction Error'+xlabel+'.pdf')
+
+############################################################################
+# Max length, non toroidal
+xlabel = 'Max Length (excluding toroidal direction) [m]'
+ylabel = 'Relative Magnitude Error'
+xarray = distancesnontor
+
+upperX = 5*1e-1
+lowerX = 5*1e-3 
+
+plot9 = plt.figure(9)
+v1 = plt.vlines(typsize,lowerY,upperY, color = 'g')
+sc1 = plt.scatter(xarray,MeanErrors[0],label = methodnames[0])
+sc2 = plt.scatter(xarray,MeanErrors[1],label = methodnames[1])
+sc3 = plt.scatter(xarray,MeanErrors[2],label = methodnames[2]) 
+sc4 = plt.scatter(xarray,MeanErrors[3],label = methodnames[3]) 
+sc5 = plt.scatter(xarray,MeanErrors[4],label = methodnames[4]) 
+ax = plt.gca()
+ax.set_yscale('log')
+ax.set_xscale('log')
+limits = [lowerX, upperX, lowerY, upperY]
+
+x = np.linspace(0.000001,1,100)
+y = x**(1)/10
+y2 = x**2/10
+plt.loglog(x,y,label = 'first order')
+plt.loglog(x,y2, label = 'second order')
+plt.axis(limits)
+ax.legend()
+plt.title('Mean Errors Description'+analyticalCase)
+plt.xlabel(xlabel)
+plt.ylabel(ylabel)
+plt.savefig(path+'/Descr'+analyticalCase+'MeanError'+xlabel+'.png')
+
+plot10 = plt.figure(10)
+sc1 = plt.scatter(xarray,L2Errors[0],label = methodnames[0])
+sc2 = plt.scatter(xarray,L2Errors[1],label = methodnames[1])
+sc3 = plt.scatter(xarray,L2Errors[2],label = methodnames[2])
+sc4 = plt.scatter(xarray,L2Errors[3],label = methodnames[3])
+sc5 = plt.scatter(xarray,L2Errors[4],label = methodnames[4])
+
+v2 = plt.vlines(typsize,lowerY,upperY, color = 'g')
+ax = plt.gca()
+ax.set_yscale('log')
+ax.set_xscale('log')
+limits = [lowerX, upperX, lowerY, upperY]
+
+x = np.linspace(0.000001,1,100)
+y = x**(1)/100
+y2 = x**2/100
+plt.loglog(x,y,label = 'first order')
+plt.loglog(x,y2, label = 'second order')
+plt.axis(limits)
+ax.legend()
+plt.title('L2 Errors Description'+analyticalCase)
+plt.xlabel(xlabel)
+plt.ylabel(ylabel)
+plt.savefig(path+'/Descr'+analyticalCase+'L2Error'+xlabel+'.png')
+
+
+plot11 = plt.figure(11)
+sc1 = plt.scatter(xarray,MaxErrors[0],label = 'Max Error '+methodnames[0])
+sc2 = plt.scatter(xarray,MaxErrors[1],label = 'Max Error '+methodnames[1])
+sc3 = plt.scatter(xarray,MaxErrors[2],label = 'Max Error '+methodnames[2])
+sc4 = plt.scatter(xarray,MaxErrors[3],label = 'Max Error '+methodnames[3])
+sc5 = plt.scatter(xarray,MaxErrors[4],label = 'Max Error '+methodnames[4])
+
+v2 = plt.vlines(typsize,lowerY,upperY, color = 'g')
+ax = plt.gca()
+ax.set_yscale('log')
+ax.set_xscale('log')
+limits = [lowerX, upperX, lowerY, upperY]
+
+x = np.linspace(0.000001,1,100)
+y = x**(1)/10
+y2 = x**2/10
+plt.loglog(x,y,label = 'first order')
+plt.loglog(x,y2, label = 'second order')
+plt.axis(limits)
+ax.legend()
+plt.title('Max Errors Description'+analyticalCase)
+plt.xlabel(xlabel)
+plt.ylabel(ylabel)
+plt.savefig(path+'/Descr'+analyticalCase+'MaxError'+xlabel+'.png')
+
+plot12 = plt.figure(12)
+sc1 = plt.scatter(xarray,GGErrors[0],label = methodnames[0])
+sc2 = plt.scatter(xarray,GGErrors[1],label = methodnames[1])
+sc3 = plt.scatter(xarray,GGErrors[2],label = methodnames[2])
+sc4 = plt.scatter(xarray,GGErrors[3],label = methodnames[3])
+sc5 = plt.scatter(xarray,GGErrors[4],label = methodnames[4])
+
+v2 = plt.vlines(typsize,lowerY,upperY, color = 'g')
+ax = plt.gca()
+ax.set_yscale('log')
+ax.set_xscale('log')
+limits = [lowerX, upperX, lowerY, upperY]
+
+x = np.linspace(0.000001,1,100)
+y = x**(1)/10
+y2 = x**2/10
+plt.loglog(x,y,label = 'first order')
+plt.loglog(x,y2, label = 'second order')
+ax.legend()
+plt.axis(limits)
+plt.title('GG Correction Error Description'+analyticalCase)
+plt.xlabel(xlabel)
+plt.ylabel(ylabel)
+plt.savefig(path+'/Descr'+analyticalCase+'GG correction Error'+xlabel+'.png')
+
+###########################################################################"
+# Nb of cells
+xlabel = 'Number of cells'
+ylabel = 'Relative Magnitude Error'
+xarray = nbs[0]
+upperX = 1e7
+lowerX = 1000
+upperY = 1e-1
+lowerY = 1e-6 
+
+
+plot13 = plt.figure(13)
+v1 = plt.vlines(typsize,lowerY,upperY, color = 'g')
+sc1 = plt.scatter(xarray,MeanErrors[0],label = methodnames[0])
+sc2 = plt.scatter(xarray,MeanErrors[1],label = methodnames[1])
+sc3 = plt.scatter(xarray,MeanErrors[2],label = methodnames[2]) 
+sc4 = plt.scatter(xarray,MeanErrors[3],label = methodnames[3]) 
+sc5 = plt.scatter(xarray,MeanErrors[4],label = methodnames[4]) 
+ax = plt.gca()
+ax.set_yscale('log')
+ax.set_xscale('log')
+limits = [lowerX, upperX, lowerY, upperY]
+ax.legend()
+x = np.linspace(0.000001,1,100)
+y = x**(1)/100
+y2 = x**2/100
+plt.loglog(x,y)
+plt.loglog(x,y2)
+plt.axis(limits)
+plt.title('Mean Errors Description'+analyticalCase+xlabel)
+plt.xlabel(xlabel)
+plt.ylabel(ylabel)
+plt.savefig(path+'/Descr'+analyticalCase+'MeanError'+xlabel+'.png')
+
+plot14 = plt.figure(14)
+sc1 = plt.scatter(xarray,L2Errors[0],label = methodnames[0])
+sc2 = plt.scatter(xarray,L2Errors[1],label = methodnames[1])
+sc3 = plt.scatter(xarray,L2Errors[2],label = methodnames[2])
+sc4 = plt.scatter(xarray,L2Errors[3],label = methodnames[3])
+sc5 = plt.scatter(xarray,L2Errors[4],label = methodnames[4])
+
+v2 = plt.vlines(typsize,lowerY,upperY, color = 'g')
+ax = plt.gca()
+ax.set_yscale('log')
+ax.set_xscale('log')
+limits = [lowerX, upperX, lowerY, upperY]
+ax.legend()
+x = np.linspace(0.000001,1,100)
+y = x**(1)/100
+y2 = x**2/100
+plt.loglog(x,y,label = 'first order')
+plt.loglog(x,y2, label = 'second order')
+plt.axis(limits)
+plt.title('L2 Errors Description'+analyticalCase)
+plt.xlabel(xlabel)
+plt.ylabel(ylabel)
+plt.savefig(path+'/Descr'+analyticalCase+'L2Error'+xlabel+'.png')
+
+
+plot15 = plt.figure(15)
+sc1 = plt.scatter(xarray,MaxErrors[0],label = 'Max Error '+methodnames[0])
+sc2 = plt.scatter(xarray,MaxErrors[1],label = 'Max Error '+methodnames[1])
+sc3 = plt.scatter(xarray,MaxErrors[2],label = 'Max Error '+methodnames[2])
+sc4 = plt.scatter(xarray,MaxErrors[3],label = 'Max Error '+methodnames[3])
+sc5 = plt.scatter(xarray,MaxErrors[4],label = 'Max Error '+methodnames[4])
+
+v2 = plt.vlines(typsize,lowerY,upperY, color = 'g')
+ax = plt.gca()
+ax.set_yscale('log')
+ax.set_xscale('log')
+limits = [lowerX, upperX, lowerY, upperY]
+ax.legend()
+x = np.linspace(0.000001,1,100)
+y = x**(1)/100
+y2 = x**2/100
+plt.loglog(x,y,label = 'first order')
+plt.loglog(x,y2, label = 'second order')
+plt.axis(limits)
+plt.title('Max Errors Description'+analyticalCase)
+plt.xlabel(xlabel)
+plt.ylabel(ylabel)
+plt.savefig(path+'/Descr'+analyticalCase+'MaxError'+xlabel+'.png')
+
+plot16 = plt.figure(16)
+sc1 = plt.scatter(xarray,GGErrors[0],label = methodnames[0])
+sc2 = plt.scatter(xarray,GGErrors[1],label = methodnames[1])
+sc3 = plt.scatter(xarray,GGErrors[2],label = methodnames[2])
+sc4 = plt.scatter(xarray,GGErrors[3],label = methodnames[3])
+sc5 = plt.scatter(xarray,GGErrors[4],label = methodnames[4])
+
+v2 = plt.vlines(typsize,lowerY,upperY, color = 'g')
+ax = plt.gca()
+ax.set_yscale('log')
+ax.set_xscale('log')
+limits = [lowerX, upperX, lowerY, upperY]
+ax.legend()
+x = np.linspace(0.000001,1,100)
+y = x**(1)/100
+y2 = x**2/100
+plt.loglog(x,y,label = 'first order')
+plt.loglog(x,y2, label = 'second order')
+plt.axis(limits)
+plt.title('GG Correction Error Description'+analyticalCase)
+plt.xlabel(xlabel)
+plt.ylabel(ylabel)
+plt.savefig(path+'/Descr'+analyticalCase+'GG correction Error'+xlabel+'.png')
+
+
+
+############################################################################
+# Single Cell
+upperX = 1e0
+lowerX = 5*1e-2
+upperY = 1e-1
+lowerY = 1e-6 
+xlabel = 'Max Cell length [m]'
+ylabel = 'Relative Magnitude Error'
+
+plot = plt.figure(17)
+sc1 = plt.scatter(singledistances,SingleErrors[0],label = methodnames[0])
+sc2 = plt.scatter(singledistances,SingleErrors[1],label = methodnames[1])
+sc3 = plt.scatter(singledistances,SingleErrors[2],label = methodnames[2])
+sc4 = plt.scatter(singledistances,SingleErrors[3],label = methodnames[3])
+sc5 = plt.scatter(singledistances,SingleErrors[4],label = methodnames[4])
+
+v2 = plt.vlines(typsize,lowerY,upperY, color = 'g')
+ax = plt.gca()
+ax.set_yscale('log')
+ax.set_xscale('log')
+limits = [lowerX, upperX, lowerY, upperY]
+
+x = np.linspace(0.000001,1,100)
+y = x**(1)/100
+y2 = x**2/100
+plt.loglog(x,y,label = 'first order')
+plt.loglog(x,y2, label = 'second order')
+ax.legend()
+plt.axis(limits)
+plt.title('Single Cell Error'+analyticalCase)
+plt.xlabel(xlabel)
+plt.ylabel(ylabel)
+plt.savefig(path+'/Descr'+analyticalCase+'SingleError.png')
+
+
+upperX = 1e-1
+lowerX = 5*1e-3
+xlabel = 'Max Cell length (excluding toroidal direction) [m]'
+plot = plt.figure(18)
+sc1 = plt.scatter(singledistancesnontor,SingleErrors[0],label = methodnames[0])
+sc2 = plt.scatter(singledistancesnontor,SingleErrors[1],label = methodnames[1])
+sc3 = plt.scatter(singledistancesnontor,SingleErrors[2],label = methodnames[2])
+sc4 = plt.scatter(singledistancesnontor,SingleErrors[3],label = methodnames[3])
+sc5 = plt.scatter(singledistancesnontor,SingleErrors[4],label = methodnames[4])
+
+v2 = plt.vlines(typsize,lowerY,upperY, color = 'g')
+ax = plt.gca()
+ax.set_yscale('log')
+ax.set_xscale('log')
+limits = [lowerX, upperX, lowerY, upperY]
+
+x = np.linspace(0.000001,1,100)
+y = x**(1)/100
+y2 = x**2/100
+plt.loglog(x,y, label='first order')
+plt.loglog(x,y2, label='second order')
+plt.axis(limits)
+ax.legend()
+plt.title('Single Cell Error Description'+analyticalCase)
+plt.xlabel(xlabel)
+plt.ylabel(ylabel)
+plt.savefig(path+'/Descr'+analyticalCase+'Single Cell Error.png')
+
+
+
+
+
+plt.show()
diff --git a/Scripts_Matthieu/ErrorAnalysisNoise.py b/Scripts_Matthieu/ErrorAnalysisNoise.py
index f062101b3b1e0f19c386a1f720bf31cc69181ed9..ef9ba7e0e0ca54f65adf5b8a496f7ff589bce2f7 100644
--- a/Scripts_Matthieu/ErrorAnalysisNoise.py
+++ b/Scripts_Matthieu/ErrorAnalysisNoise.py
@@ -9,6 +9,7 @@ import PlottingFunctions
 import AnalysisFunctions
 import os
 import sys
+plt.rcParams.update({'font.size': 12})
 
 # Analyze the performance and errors of different methods
 # This script does this for given analytical prescriptions for potential and
@@ -21,9 +22,10 @@ path = os.getcwd()
 print(path)
 #print(Cases)
 case = ''
-methods = ['1', '2', '3', '4'] 
-noiseLevels = [0,0.005,0.01,0.015,0.02,0.025,0.03,0.035]
-noiseNames = ['0','11','12','13','14', '15','16','17']
+methods = ['11', '12', '13', '14', '15'] 
+methodnames=['GG Cell-centered', 'LS Cell-centered','GG Nodal', 'LS Nodal', 'Mapping']
+noiseLevels = [0,0.3,0.6,0.9,1.2,1.5]
+noiseNames = ['0','21','22', '23', '24', '25']
 
 # Define structures to be computed    
 efield_error_key = 'efield_er_rel_mag'
@@ -62,21 +64,17 @@ typsize = 50*36*150
 #avGL =  
  
 
-upperX = 0.20
+upperX = 2
 lowerX = -0.01   
-upperY = 1
+upperY = 10
 lowerY = 1e-5   
 plot1 = plt.figure(1)
 #v1 = plt.vlines(typsize,lowerY,upperY, color = 'g')
-sc1 = plt.scatter(noiseLevels,avErrors[0],color = 'red',label = 'Average Error '+methods[0])
-sc2 = plt.scatter(noiseLevels,avErrors[1],color = 'green',label = 'Average Error '+methods[1])
-sc3 = plt.scatter(noiseLevels,avErrors[2],color = 'blue',label = 'Average Error '+methods[2]) 
-sc4 = plt.scatter(noiseLevels,avErrors[3],color = 'yellow',label = 'Average Error '+methods[3]) 
-sc5 = plt.scatter(noiseLevels,avErrorsD[0],color = 'maroon',label = 'Average Error ExB '+methods[0])
-sc6 = plt.scatter(noiseLevels,avErrorsD[1],color = 'darkgreen',label = 'Average Error ExB'+methods[1])
-sc7 = plt.scatter(noiseLevels,avErrorsD[2],color = 'navy',label = 'Average Error ExB'+methods[2]) 
-sc8 = plt.scatter(noiseLevels,avErrorsD[3],color = 'gold',label = 'Average Error ExB'+methods[3])
-#sc5 = plt.scatter(nbs[1],avErrors[4],color = 'red',label = 'Average Error '+methods[4]) 
+sc1 = plt.scatter(noiseLevels,avErrors[0],color = 'red',label = 'Average Error '+methodnames[0])
+sc2 = plt.scatter(noiseLevels,avErrors[1],color = 'green',label = 'Average Error '+methodnames[1])
+sc3 = plt.scatter(noiseLevels,avErrors[2],color = 'blue',label = 'Average Error '+methodnames[2]) 
+sc4 = plt.scatter(noiseLevels,avErrors[3],color = 'yellow',label = 'Average Error '+methodnames[3]) 
+sc5 = plt.scatter(noiseLevels,avErrors[4],color = 'black',label = 'Average Error '+methodnames[4]) 
 ax = plt.gca()
 ax.set_yscale('log')
 #ax.set_xscale('log')
@@ -86,6 +84,28 @@ plt.axis(limits)
 plt.title('Average Errors Description'+analyticalCase)
 plt.xlabel('Noise SD Measure')
 plt.ylabel('Relative Error')
-plt.savefig(path+'/Descr'+analyticalCase+'Error.png')
+plt.savefig(path+'/Descr'+analyticalCase+'Error.pdf')
+
+
+upperX = 2
+lowerX = -0.01   
+upperY = 1
+lowerY = 1e-2  
+plot1 = plt.figure(2)
+#v1 = plt.vlines(typsize,lowerY,upperY, color = 'g')
+sc1 = plt.scatter(noiseLevels,avErrors[0],color = 'red',label = 'Average Error '+methodnames[0])
+sc2 = plt.scatter(noiseLevels,avErrors[1],color = 'green',label = 'Average Error '+methodnames[1])
+sc3 = plt.scatter(noiseLevels,avErrors[2],color = 'blue',label = 'Average Error '+methodnames[2]) 
+sc4 = plt.scatter(noiseLevels,avErrors[3],color = 'yellow',label = 'Average Error '+methodnames[3]) 
+sc5 = plt.scatter(noiseLevels,avErrors[4],color = 'black',label = 'Average Error '+methodnames[4]) 
+ax = plt.gca()
+
+limits = [lowerX, upperX, lowerY, upperY]
+ax.legend()
+plt.axis(limits)
+plt.title('Average Errors Description'+analyticalCase)
+plt.xlabel('Noise SD Measure')
+plt.ylabel('Relative Error')
+plt.savefig(path+'/Descr'+analyticalCase+'NoiseError.pdf')
 
 plt.show() 
diff --git a/Scripts_Matthieu/ErrorAnalysisResolutionNoise.py b/Scripts_Matthieu/ErrorAnalysisResolutionNoise.py
new file mode 100644
index 0000000000000000000000000000000000000000..b996f9fbb73e647f434c937694d71ce2fa8c6f98
--- /dev/null
+++ b/Scripts_Matthieu/ErrorAnalysisResolutionNoise.py
@@ -0,0 +1,133 @@
+import math
+import driftFunctions
+import AnalyticalPrescriptions
+import Constants
+import Utilities
+import pandas as pd
+import matplotlib.pyplot as plt
+import PlottingFunctions
+import AnalysisFunctions
+import os
+import sys
+import numpy as np
+import matplotlib
+plt.rcParams.update({'font.size': 12})
+# Analyze the performance and errors of different methods
+# This script does this for given analytical prescriptions for potential and
+# magnetic field
+# Specify which analytical case is considered and which cases should be 
+# calculated
+analyticalCase = sys.argv[1]
+Cases = sys.argv[2:]
+noise = '23'
+path = os.getcwd()
+print(path)
+print(Cases)
+methodnames = ['GG Cell-centered', 'LS Cell-centered', 'GG Nodal', 'LS Nodal','Mapping']#, 'GG exact surfaces'] 
+methods = ['1','2','3','4','5']#, '6']
+# Define structures to be computed    
+efield_error_key = 'efield_er_rel_mag'
+L2Errors = []
+MeanErrors = []
+lens = []
+
+nbs = []
+r = 530
+phi = 0.15
+z = 45
+gg = 10
+for method in methods:
+#    errorsDF = pd.DataFrame()
+    errorsL2 = []
+    errorsMean = []
+    charLength = []
+
+    
+    for case in Cases:
+        print(case)
+        [caseInfo, caseInfoGB] = AnalysisFunctions.readAllInfo(method, analyticalCase, noise, case)
+        caseErrors = caseInfo.get(efield_error_key)
+#	errorsDF[case] = caseErrors
+        y = len(caseErrors)
+        errorsL2.append(math.sqrt(sum([x**2/y for x in caseErrors])))
+        errorsMean.append(sum([x for x in caseErrors])/y)
+        vol = AnalysisFunctions.readText(path+'/'+case+'Output'+method+'Descr'+analyticalCase+'Noise'+noise+'/VOLAV')
+        vol = float(vol[0])
+#        print(vol)
+        charLength.append((vol/10**6)**(1/3))
+
+    L2Errors.append(errorsL2)
+    MeanErrors.append(errorsMean)
+    lens.append(charLength)
+
+print('L2',L2Errors)
+print('Mean', MeanErrors)
+print('char length', lens)
+
+upperX = 5*1e-1
+lowerX = 5*1e-3
+upperY = 1
+lowerY = 1e-2   
+
+##########################################################
+# Characteristic Length
+
+
+xlabel = 'Characteristic Cell Length [m]'
+ylabel = 'Relative Magnitude Error'
+xarray = lens[0]
+
+
+plot1 = plt.figure(1)
+sc1 = plt.scatter(xarray,MeanErrors[0],label = methodnames[0])
+sc2 = plt.scatter(xarray,MeanErrors[1],label = methodnames[1])
+sc3 = plt.scatter(xarray,MeanErrors[2],label = methodnames[2]) 
+sc4 = plt.scatter(xarray,MeanErrors[3],label = methodnames[3]) 
+sc5 = plt.scatter(xarray,MeanErrors[4],label = methodnames[4]) 
+#sc6 = plt.scatter(xarray,MeanErrors[5],label = methodnames[5])
+ax = plt.gca()
+ax.set_yscale('log')
+ax.set_xscale('log')
+limits = [lowerX, upperX, lowerY, upperY]
+
+x = np.linspace(0.000001,1,100)
+y = x**(-1)/100
+y2 = x**(-2)/100
+y12 = x**(-1/2)/100*3
+plt.loglog(x,y,label = 'first order')
+plt.loglog(x,y2, label = 'second order')
+plt.loglog(x,y12, label = 'half order')
+ax.legend()
+plt.axis(limits)
+plt.title('Mean Errors Description '+analyticalCase)
+plt.xlabel(xlabel)
+plt.ylabel(ylabel)
+plt.savefig(path+'/Descr'+analyticalCase+'MeanError'+xlabel+'.pdf')
+
+plot2 = plt.figure(2)
+sc1 = plt.scatter(xarray,L2Errors[0],label = methodnames[0])
+sc2 = plt.scatter(xarray,L2Errors[1],label = methodnames[1])
+sc3 = plt.scatter(xarray,L2Errors[2],label = methodnames[2])
+sc4 = plt.scatter(xarray,L2Errors[3],label = methodnames[3])
+sc5 = plt.scatter(xarray,L2Errors[4],label = methodnames[4])
+
+ax = plt.gca()
+ax.set_yscale('log')
+ax.set_xscale('log')
+limits = [lowerX, upperX, lowerY, upperY]
+ax.legend()
+x = np.linspace(0.000001,1,100)
+y = x**(-1)/1000
+y2 = x**(-2)/100
+y12 = x**(-1/2)/100*3
+plt.loglog(x,y,label = 'first order')
+plt.loglog(x,y2, label = 'second order')
+plt.loglog(x,y12, label = 'half order')
+plt.axis(limits)
+plt.title('L2 Errors Description'+analyticalCase)
+plt.xlabel(xlabel)
+plt.ylabel(ylabel)
+plt.savefig(path+'/Descr'+analyticalCase+'L2Error'+xlabel+'.pdf')
+
+
+plt.show()
diff --git a/Scripts_Matthieu/ExtractNoiseInfo.py b/Scripts_Matthieu/ExtractNoiseInfo.py
index 9ec01cab5ce866aa9b00b5e99f36adfef2d76e0a..91170c3b6c5639b47850674e0cc24ba63988a22d 100644
--- a/Scripts_Matthieu/ExtractNoiseInfo.py
+++ b/Scripts_Matthieu/ExtractNoiseInfo.py
@@ -1,28 +1,40 @@
 import numpy as np
 import os
 import matplotlib.pyplot as plt
-
-
+import driftFunctions
+import AnalyticalPrescriptions
+import Constants
+import Utilities
+import pandas as pd
+import PlottingFunctions
+import AnalysisFunctions
+import sys
+from scipy import sparse
+import scipy.optimize as optimize
+import math
+import matplotlib
+import statistics
+from scipy import stats
 path = os.getcwd()
+sys.path.append('../../emc3-grid-generator')
+from gg import GG
 
 xnames = ['_1','_2','_3','_4','_5','_6','_7','_8','_9']#,'_10']
 Tdata = []
 
+def monexp(x,L):
+    return np.exp(-x/L)
+
+
 for i in range(len(xnames)):
-    file = open('/u/mbj/Drifts/data-for-drift-computations/Reference-FullRes/X-TEST-HR/x-out'+xnames[i]+'/x-out/TE_TI','r')
-    Temptemp = file.readlines()
+    file = open('/u/mbj/Drifts/data-for-drift-computations/Reference-FullRes2/X-TEST-HR/x-out'+xnames[i]+'/x-out/TE_TI','r')
+    Temptemp = [r.split() for r in file.readlines()]
     Temp = []
-    for i in range(len(Temptemp)):
-        Temp = Temp+[float(x) for x in Temptemp[i].split()]
-#    print(Temp)
-    print(len(Temp))
-    Temp = np.array(Temp)
-#    Temp = np.loadtxt('/u/mbj/Drifts/data-for-drift-computations/Reference-FullRes/X-TEST-HR/x-out'+xnames[i]+'/x-out/TE_TI', max_rows = 25851)
-#    Temp =np.concatenate(Temp.flatten(),np.loadtxt('/u/mbj/Drifts/data-for-drift-computations/Reference-FullRes/X-TEST-HR/x-out'+xnames[i]+'/x-out/TE_TI',skiprows=25851,max_rows = 1))
-#    Tlength = len(Temp)
-#    print(Tlength)
-#    Temp.flatten()
-    Tdata.append(Temp)
+    for i in Temptemp:
+        Temp.extend(i)
+    Tempel = Temp[0:int(len(Temp)/2)]
+    Tempar = np.array(Tempel, dtype=float)
+    Tdata.append(Tempar)
 
 
 avTemp = Tdata[0]    
@@ -30,42 +42,325 @@ for i in range(1,len(xnames)):
     print(Tdata[i][50000:50010])
     avTemp = np.add(avTemp,Tdata[i])
 avTemp = avTemp*1/len(xnames)
-print(avTemp[100000:100010])
 
 difData = []  
 normdifData = []  
+variance = []
 for i in range(len(xnames)):
-    difTemp = np.absolute(np.subtract(avTemp,Tdata[i]))
-    normdifTemp = np.divide(difTemp,avTemp)
+    difTemp = np.subtract(avTemp,Tdata[i])
+#    normdifTemp = np.divide(difTemp,avTemp)
+    var = np.square(difTemp)
+    variance.append(var)
     difData.append(difTemp)
-    normdifData.append(normdifTemp)
+#    normdifData.append(normdifTemp) 
     
 avDif = difData[0]    
+avVar = variance[0]
 for i in range(1,len(xnames)):
     avDif = np.add(avDif,difData[i])
+    avVar = np.add(avVar,variance[i])
 avDif = avDif*1/len(xnames)
+avVar = avVar*1/len(xnames)
+print('average variance', statistics.mean(avVar))
 
-#covMat = np.outer(avDif,avDif) #np.empty((size,size))
-#for i in range(size):
-#    for j in range(size):
-#        covMat[i,j] = avDif[i]*avDif[j]*len(xnames)
-	
-
-avError = np.average(avDif)
-print('avError', avError)
-print(avDif)
-print(avTemp)
+filvar = filter(lambda var:var<3*median, avVar)	
 
 plot1 = plt.figure(1)
 plt.xlabel('Temperature')
 plt.ylabel('Variance')
-sc1 = plt.scatter(avTemp,avDif)
+plt.title('Temperature vs Variance')
+sc1 = plt.scatter(avTemp,avVar)
 #plt.colorbar(sc1)
 
 
 plot2 = plt.figure(2)
 plt.xlabel('Av Temperature')
 plt.ylabel('Temp')
+plt.title('Temperature vs Av Temp')
 sc1 = plt.scatter(avTemp,Tdata[1])
 #plt.colorbar(sc1)
+median=statistics.median(avVar)
+print('median Var', median)
+# plot histogram of variance
+plot3 = plt.figure(3)
+plt.hist(avVar,1000, density=True)
+plt.title('Histogram of Variance')
+
+# Plot variance in space
+plot4 = plt.figure(4)
+plt.xlabel('Radial Position')
+plt.ylabel('Vertical Position')
+posdata = AnalysisFunctions.readText(path+'/Output1Descr6Noise0/DRIFT_POS')
+# Load position data
+geoData = AnalysisFunctions.readText('/u/mbj/Drifts/data-for-drift-computations/Reference-FullRes2/Output5Descr6Noise0/NBS')
+volumes =np.array(AnalysisFunctions.readText('/u/mbj/Drifts/data-for-drift-computations/Reference-FullRes2/Output3Descr3Noise0/VOLUMES'),dtype=float)
+
+volfil=[]
+varfil=[]
+for i in range(len(volumes)):
+      if avVar[i]<50:
+          varfil.append(avVar[i])
+          volfil.append(volumes[i])
+
+plot7 = plt.figure(7)
+# Creating plot
+plt.hist2d(volfil, varfil, bins=1000,norm=matplotlib.colors.LogNorm())
+plt.colorbar()
+plt.title("Variance vs Volume")
+
+
+
+########### Get covariance data => Try to fit correlation length model, looking at individual surfaces ##############################
+# Get single error
+limits = AnalysisFunctions.readText(path+'/Output1Descr6Noise0/LIMITS')
+dim = math.floor(len(geoData)/5)
+TS = [15]
+RS=[34]
+#RS=[60,61,62,63,64]
+PS =  [100]
+choice = 1
+positions = []
+difvalues=[]
+variances =[]
+indices=[]
+vols=[]
+print(len(difData))
+print(len(difData[0]))
+print(dim)
+print(int(len(limits)/4))
+for i in range(dim):
+    coords =[float(x) for x in geoData[5*i+2].split()]
+    limT = [float(x) for x in limits[4*i+1].split()]
+    limP = [float(x) for x in limits[4*i+2].split()]
+    limR = [float(x) for x in limits[4*i+3].split()]
+    if choice==1:
+        if limT[0] in TS and limR[0] in RS:
+            positions.append(coords)
+            difvalues.append([r[i] for r in difData])
+            indices.append(limP[0])
+    elif choice == 2:
+        if limT[0] in TS and limP[0] in PS:
+            positions.append(coords)
+            difvalues.append([r[i] for r in difData])
+    else:
+        if limR[0] in RS and limP[0] in PS:
+            positions.append(coords)
+            difvalues.append([r[i] for r in difData])    
+            indices.append(limT[0])
+    
+posArray = np.array(positions)
+difArray = np.array(difvalues)
+print(posArray)
+print(difArray)
+print(posArray.shape)
+
+covmat = np.cov(difArray)
+cormat = np.corrcoef(difArray)
+cor1D = cormat[0]
+cor1Dn = cormat[30]
+cov1D = covmat[0]
+print(cor1D)
+distances = []
+poldistances = []
+polangles = []
+#indices=np.linspace(0,len(cor1D),len(cor1D))
+for i in range(len(cor1D)):
+    dis = min(math.sqrt((positions[i][0]-positions[0][0])**2+(positions[i][2]-positions[0][2])**2+((positions[i][0]+positions[0][0])/2*abs(positions[i][1]-positions[0][1]))**2), \
+    math.sqrt((positions[i][0]-positions[35][0])**2+(positions[i][2]-positions[35][2])**2+((positions[i][0]+positions[35][0])/2*abs(positions[i][1]-positions[35][1]))**2))
+#    poldis = abs(math.atan2(positions[i][2],positions[i][0]-580)-math.atan2(positions[0][2],positions[0][0]-580))*(math.sqrt((positions[i][0]-580)**2+(positions[i][2])**2)+math.sqrt((positions[0][0]-580)**2+(positions[0][2])**2))/2
+#    polangle = math.atan2(positions[i][2],positions[i][0]-580)
+    distances.append(dis)
+#    poldistances.append(poldis)
+#    polangles.append(polangle)
+
+plot=plt.figure(20)
+r = [r[0] for r in positions]
+z = [r[2] for r in positions]
+
+plt.title('Spatial Distribution of Considered Points')
+grid = GG()
+dat = grid.read_mesh3d(fn='Inputs/GEOMETRY_3D_DATA')
+grid.plot_mesh3d(dat,TS[0])
+sc1 = plt.scatter(r,z,color='r')
+for i in range(len(r)):
+    plt.plot(r[i],z[i],"or")
+xl = indices
+xlab='Grid Indices'
+plot5 = plt.figure(5)
+sc1 = plt.scatter(xl,cov1D)
+plt.title('Covariance')
+plt.xlabel(xlab)
+plt.ylabel('Covariance')
+plot5 = plt.figure(6)
+sc1 = plt.scatter(xl,cor1D)
+#sc1 = plt.scatter(xl,cor1Dn)
+plt.xlabel(xlab)
+plt.ylabel('Correlation Ceofficient')
+plt.title('Correlation with Starting Cell')
+plot5 = plt.figure(8)
+sc1 = plt.scatter(xl,cor1D)
+plt.xlabel(xlab)
+plt.ylabel('Correlation Ceofficient')
+plt.title('Correlation with Starting Cell')
+
+
+
+corlen=25
+x = np.array(xl)
+y=np.array(cor1D)
+m, b = np.polyfit(x, y, 1)
+print(m,b,'linear fit')
+cv,p=stats.pearsonr(x,y)
+print(cv,p)
+popt,pcov=optimize.curve_fit(monexp,x,y)
+print('popt',popt,'pcov',pcov)
+plt.plot(x,monexp(x,*popt), label='exponential fit')
+plt.plot(x,m*x+b,'--k',label='linear fit')
+plt.legend()
+
+
+plt.show()
+
+#############################################################################################################
+
+# Get single error
+positions =[]
+variances =[]
+vols=[]
+for i in range(len(posdata)):
+    pos = [float(x) for x in posdata[i].split()]
+    if i==0:
+        torpos = pos[1]
+    if pos[1] == torpos and avVar[i] < 3*median and avVar[i]>median/10:
+        positions.append(pos)
+        variances.append(avVar[i])
+        vols.append(volumes[i])
+r = [x[0] for x in positions]
+z = [x[2] for x in positions]
+
+sc1 = plt.scatter(r,z,c=variances, cmap='plasma', norm=matplotlib.colors.LogNorm())    
+plt.colorbar(sc1)
+plt.title('Variances filtered')
+
+plot = plt.figure(12)
+sc1 = plt.scatter(r,z,c=vols, cmap='plasma', norm=matplotlib.colors.LogNorm())    
+plt.colorbar(sc1)
+plt.title('Cell volumes')
+
+ 
+dim = math.floor(len(geoData)/5)
+print(dim)
+boundary = np.empty(dim)
+neighbours = np.empty((dim,6))
+coords = np.empty((dim,3))
+distances = np.empty((dim,6))
+
+for i in range(dim):
+    boundary[i] = geoData[5*i+1]
+    neighbours[i][:] = [int(x) for x in geoData[5*i].split()]
+    coords[i][:] =[float(x) for x in geoData[5*i+2].split()]
+    distances[i][:] = [float(x) for x in geoData[5*i+3].split()]+[float(x) for x in geoData[5*i+4].split()]
+
+
+nbv = len(xnames)
+varsparse = np.empty((dim,7,nbv))
+covarsparse = np.empty((dim,7,7))
+corsparse = np.empty((dim,7,7))
+for i in range(dim):
+    if int(boundary[i]) !=1:
+        for j in range(nbv):
+            for k in range(6):
+                varsparse[i,k,j] = difData[j][int(neighbours[i][k])]
+            varsparse[i,6,j] = difData[j][i]
+        covarsparse[i,:,:] = np.cov(varsparse[i,:,:])
+        corsparse[i,:,:] = np.corrcoef(varsparse[i,:,:])
+print(covarsparse[88907,:,:])
+print(corsparse[88907,:,:])
+
+
+positions =[]
+variances =[]
+radcovar=[]
+polcovar=[]
+torcovar=[]
+angles = list(set([r[1] for r in coords]))
+torpos = angles[4]
+print(torpos, 'torpos')
+for i in range(len(coords)):
+    if coords[i][1] == torpos and int(boundary[i]) !=1:
+        positions.append(coords[i])
+        variances.append(avVar[i])
+        radcovar.append((corsparse[i,6,1]+corsparse[i,6,0])/2)
+        polcovar.append((corsparse[i,6,3]+corsparse[i,6,2])/2)
+        torcovar.append((corsparse[i,6,5]+corsparse[i,6,4])/2)
+
+print(len(positions))
+r = [x[0] for x in positions]
+z = [x[2] for x in positions]
+
+plot4 = plt.figure(13)
+plt.xlabel('Radial Position')
+plt.ylabel('Vertical Position')
+sc1 = plt.scatter(r,z,c=variances, cmap='plasma', norm=matplotlib.colors.LogNorm())    
+plt.colorbar(sc1)
+plt.title('Variances' )
+
+plot8 = plt.figure(8)
+plt.xlabel('Radial Position [cm]')
+plt.ylabel('Vertical Position [cm]')
+title = 'Variance'
+plt.title(title)
+sc1 = plt.scatter(coords[:,0], coords[:,2], s=20, c=avVar, cmap='plasma')
+plt.colorbar(sc1)
+plt.title('Variances')
+
+plot6 = plt.figure(14)
+plt.xlabel('Cell Volume')
+plt.ylabel('Variance')
+sc1 = plt.scatter(volumes,avVar)
+plt.title('Variance vs Volume')
+
+
+
+
+
+plot9 = plt.figure(9)
+sc1 = plt.scatter(r,z,c=radcovar, cmap='plasma')  
+plt.colorbar(sc1)
+plt.xlabel('Horizontal Position [cm]')
+plt.ylabel('Vertical Position [cm]')
+plt.title('Radial Covariance')
+
+plot9 = plt.figure(10)
+sc1 = plt.scatter(r,z,c=polcovar, cmap='plasma') 
+plt.colorbar(sc1)
+plt.title('Poloidal Covariance')
+plt.xlabel('Horizontal Position [cm]')
+plt.ylabel('Vertical Position [cm]')
+
+plot9 = plt.figure(11)
+sc1 = plt.scatter(r,z,c=torcovar, cmap='plasma') 
+plt.colorbar(sc1)
+plt.title('Toroidal Covariance')
+plt.xlabel('Horizontal Position [cm]')
+plt.ylabel('Vertical Position [cm]')
+
+
+
+def monexp(x,L):
+    return np.exp(-x/L)
+
+
+#plot = plt.figure(8)
+#plt.hist(covarsparse[:,6,0],bins=50)
+#plot = plt.figure(9)
+#plt.hist(covarsparse[:,6,1],bins=50)
+#plot = plt.figure(10)
+#plt.hist(covarsparse[:,6,2],bins=50)
+#plot = plt.figure(11)
+#plt.hist(covarsparse[:,6,3],bins=50)
+#plot = plt.figure(12)
+#plt.hist(covarsparse[:,6,4],bins=50)
+#plot = plt.figure(13)
+#plt.hist(covarsparse[:,6,5],bins=50)
 plt.show()
diff --git a/Scripts_Matthieu/PlotArrows.py b/Scripts_Matthieu/PlotArrows.py
new file mode 100644
index 0000000000000000000000000000000000000000..21c9322b133a2363e98d9294d5b87d992c701505
--- /dev/null
+++ b/Scripts_Matthieu/PlotArrows.py
@@ -0,0 +1,153 @@
+import math
+import driftFunctions
+import AnalyticalPrescriptions
+import Constants
+import Utilities
+import pandas as pd
+import matplotlib.pyplot as plt
+import matplotlib
+import PlottingFunctions
+import AnalysisFunctions
+import sys
+import os
+import seaborn as sns
+import numpy as np
+sys.path.append('../../emc3-grid-generator')
+from gg import GG
+method = sys.argv[1]
+anCase = sys.argv[2]
+noise = sys.argv[3]
+#weight = sys.argv[4]
+path = '' # Case+'/'
+allInfo = []
+allInfoGB = []
+
+[info,infoGB] = AnalysisFunctions.readAllInfo(method,anCase,noise,'') #Case+'/'
+keys = list(info.keys())
+keysGB = list(infoGB.keys())
+
+for i in range(len(keys)-1): # don't include pos
+    allInfo.append(info.get(keys[i]))
+for i in range(len(keysGB)-1): # don't include pos
+    allInfoGB.append(infoGB.get(keysGB[i]))
+pos = info.get('pos')
+gbpos = infoGB.get('gb_pos')
+
+angles = [r[1] for r in pos]
+uniqueangles = list(set(angles))    
+print(uniqueangles)
+uniqueangles.sort()
+# Get a slice for 2D visualization
+
+ind=3
+torsurf = [uniqueangles[ind]]
+print(torsurf)
+torSliceDict = {}
+torslices = AnalysisFunctions.getAllSlices(pos,allInfo,1,torsurf)
+for i in range(len(torslices)):
+    torSliceDict[keys[i]] = torslices[i]
+    
+
+angles = [r[1] for r in gbpos]
+uniqueanglesGB = list(set(angles))     
+torsurfGB = [uniqueanglesGB[33]]
+torSliceDictGB = {}
+torslicesGB = AnalysisFunctions.getAllSlices(gbpos,allInfoGB,1,torsurfGB)
+for i in range(len(torslicesGB)):
+    torSliceDictGB[keysGB[i]] = torslicesGB[i]   
+     
+# Make Plots
+x = [r[0] for r in torSliceDict.get('pos')]
+y = [r[1] for r in torSliceDict.get('pos')]
+
+xg = [r[0] for r in torSliceDictGB.get('gb_pos')]
+yg = [r[1] for r in torSliceDictGB.get('gb_pos')]
+
+# Transform Efield vectors to x,y
+Ex = [r[0] for r in torSliceDict.get('efield_perp')]
+Ey = [r[2] for r in torSliceDict.get('efield_perp')]
+
+GBx = [r[0] for r in torSliceDictGB.get('gradb_perp')]
+GBy = [r[2] for r in torSliceDictGB.get('gradb_perp')]
+
+nb = 90
+length = len(Ex)
+step = int(length/nb-1)
+plt.figure(1)
+fig, ax = plt.subplots()
+plt.xlabel('Radial Position [cm]')
+plt.ylabel('Vertical Position [cm]')
+title = 'Electric Field'
+plt.title(title)
+count=0
+countl=0
+#grid = GG()
+#dat = grid.read_mesh3d(fn='Inputs/GEOMETRY_3D_DATA')
+#grid.plot_mesh3d(dat,ind)
+for i in range(0,length):
+    if i%step == 0: #or (x[i]< 550 and y[i] < -90 and count <10) or (x[i]<540 and y[i]>90 and countl<10):
+        plt.quiver(x[i],y[i],Ex[i], Ey[i])
+        if x[i]<540 and y[i]<-90:
+            count=count+1
+            print(count, 'count')
+        elif x[i]<540 and y[i]>90:
+            countl=countl+1
+            print(countl, 'countl')
+
+
+# Transform Efield vectors to x,y
+Edx = [r[0] for r in torSliceDict.get('edrifts_perp')]
+Edy = [r[2] for r in torSliceDict.get('edrifts_perp')]
+
+Gdx = [r[0] for r in torSliceDict.get('gdrifts_perp')]
+Gdy = [r[2] for r in torSliceDict.get('gdrifts_perp')]
+
+length = len(Ex)
+step = int(length/nb-1)
+plt.figure(2)
+ax = plt.subplots()
+plt.xlabel('Radial Position [cm]')
+plt.ylabel('Vertical Position [cm]')
+title = 'ExB Drifts'
+plt.title(title)
+count=0
+countl=0
+
+for i in range(0,length):
+    if i%step == 0:# or (x[i]< 550 and y[i] < -90 and count <10) or (x[i]<540 and y[i]>90 and countl<10):
+        plt.quiver(x[i],y[i],Edx[i], Edy[i])
+        if x[i]<540 and y[i]<-90:
+            count=count+1
+            print(count, 'count')
+        elif x[i]<540 and y[i]>90:
+            countl=countl+1
+            print(countl, 'countl')
+    
+
+plt.savefig(method+anCase+noise+'VelocityFieldB.pdf')
+
+length = len(Gdx)
+step = int(length/nb-1)
+plt.figure(3)
+fig, ax = plt.subplots()
+plt.xlabel('Radial Position [cm]')
+plt.ylabel('Vertical Position [cm]')
+title = 'Diamagnetic Drifts'
+plt.title(title)
+count=0
+countl=0
+
+for i in range(0,length):
+    if i%step == 0:# or (x[i]< 550 and y[i] < -90 and count <10) or (x[i]<540 and y[i]>90 and countl<10):
+        plt.quiver(x[i],y[i],Gdx[i], Gdy[i])
+        if x[i]<540 and y[i]<-90:
+            count=count+1
+            print(count, 'count')
+        elif x[i]<540 and y[i]>90:
+            countl=countl+1
+            print(countl, 'countl')
+    
+
+
+plt.savefig(method+anCase+noise+'GBVelocityFieldB.pdf')
+plt.show()
diff --git a/Scripts_Matthieu/PlotTrajectory.py b/Scripts_Matthieu/PlotTrajectory.py
index ccd263998fc6f789946c70f17a38f2c46dd7ced9..5fa53ef2037f5679835fbfcd1731715003f56a68 100644
--- a/Scripts_Matthieu/PlotTrajectory.py
+++ b/Scripts_Matthieu/PlotTrajectory.py
@@ -2,6 +2,11 @@ import matplotlib.pyplot as plt
 import os
 import sys
 import AnalysisFunctions
+sys.path.append('../../emc3-grid-generator')
+from gg import GG
+import seaborn as sns
+import pandas as pd
+import numpy as np
 
 path = os.getcwd()
 method = sys.argv[1]
@@ -9,28 +14,50 @@ analyticalP = sys.argv[2]
 noise = sys.argv[3]
 name = sys.argv[4]
 aP = int(analyticalP)
-posNum = []
-posLen = []
+
+plt.rcParams.update({'font.size': 12})
 
 plot1 = plt.figure(1)
 plt.xlabel('Radial Position [cm]')
 plt.ylabel('Vertical Position [cm]')
 title = 'Trajectory'+ name
 plt.title(title)
-for j in range(1,6):
+for j in range(3,5):
+    posNum = []
+    posLen = []
+    posanNum = []
+    posanLen = []
     positions = AnalysisFunctions.readText(path+ '/Output' + method+'Descr'+analyticalP+'Noise'+noise+'/TRAJECTORY'+str(j))
-    for i in range(len(positions)):
-        if i%2 == 0:
-            p = [float(x) for x in positions[i].split()]
-            posNum.append(p)
-        else:
-            l = float(positions[i][0])
-            posLen.append(l)
-
+    dim = len(positions)
+    for i in range(int(dim/4)):
+        p = [float(x) for x in positions[4*i].split()]
+        posNum.append(p)
+        l = float(positions[4*i+1].split()[0])
+        posLen.append(l)
+        pan = [float(x) for x in positions[4*i+2].split()]
+        posanNum.append(pan)
+        lan = float(positions[4*i+3].split()[0])
+        posanLen.append(lan)
+    posLen=np.array(posLen)*1/max(posLen)
     x = [r[0] for r in posNum]
     y = [r[2] for r in posNum]
+    xan = [r[0] for r in posanNum]
+    yan = [r[2] for r in posanNum]
 
 
-    sc = plt.scatter(x, y, s=1)
 
+    sc = plt.scatter(x, y, s=5, c =posLen, cmap='Reds', label='Numeric'+str(i))
+    scan = plt.scatter(xan, yan,c=posanLen,cmap='Greys', s=5, label='Analytical'+str(i))
+#    plt.colorbar(sc)
+#    plt.colorbar(scan)
+    plt.xlabel('Radial Position [cm]')
+    plt.ylabel('Vertical Position [cm]')
+    title = 'Drift Orbits'
+    plt.title(title)
+
+grid = GG()
+dat = grid.read_mesh3d(fn='Inputs/GEOMETRY_3D_DATA')
+grid.plot_mesh3d(dat,3)
+plt.savefig('Trajectories'+method+analyticalP+noise+'.pdf')
 plt.show()
+
diff --git a/Scripts_Matthieu/PlotWeirdCells.py b/Scripts_Matthieu/PlotWeirdCells.py
new file mode 100644
index 0000000000000000000000000000000000000000..53997531e15e5e579cfe38d3fe7ff2117f76b381
--- /dev/null
+++ b/Scripts_Matthieu/PlotWeirdCells.py
@@ -0,0 +1,77 @@
+import numpy as np
+import os
+import matplotlib.pyplot as plt
+import driftFunctions
+import AnalyticalPrescriptions
+import Constants
+import Utilities
+import pandas as pd
+import PlottingFunctions
+import AnalysisFunctions
+import sys
+from scipy import sparse
+import math
+from mpl_toolkits import mplot3d
+path = os.getcwd()
+sys.path.append('../../emc3-grid-generator')
+from gg import GG
+geoData = AnalysisFunctions.readText(path+'/Output2Descr1Noise0/DRIFT_POS')
+limits = AnalysisFunctions.readText(path+'/Output2Descr1Noise0/LIMITS')
+points = []
+for i in range(len(geoData)):
+  #  print(limits[4*i+1])
+ #   print([x for x in limits[4*i+1].split()])
+    lim = [float(x) for x in limits[4*i+1].split()]
+#    lim = list(filter((' ').__ne__, lim))
+#    lim = list(filter(('\n').__ne__, lim))
+ #   print(lim)
+#    lim = [float(x) for x in lim]
+    
+    if abs(lim[0]-lim[1])>1:
+        pos = [float(x) for x in geoData[i].split()]
+        points.append(pos)
+        print(i)
+
+x = [x[0]*math.cos(x[1]) for x in points]
+z = [x[2] for x in points]
+y = [x[0]*math.sin(x[1]) for x in points] 
+
+fig = plt.figure()
+ax = plt.axes(projection='3d')
+ax.scatter3D(x,y,z)
+print(points)
+
+angles = [r[1] for r in points]
+uniqueangles = list(set(angles)) 
+uniqueangles.sort()
+print(uniqueangles)
+print(len(uniqueangles))
+count = []
+for i in range(len(uniqueangles)):
+    count.append(angles.count(uniqueangles[i]))
+max_value = max(count)
+max_index = count.index(max_value)
+print('max',max_value,max_index)
+torPos = uniqueangles[max_index]
+ind = int(torPos/max(uniqueangles)*36-1)
+rp = []
+zp = []
+for i in range(len(x)):
+    if points[i][1] == torPos:
+        rp.append(points[i][0])
+        zp.append(points[i][2])    
+
+print(torPos)
+
+plot2 = plt.figure(2)
+plt.xlabel('Radial Position [cm]')
+plt.ylabel('Vertical Position [cm]')
+sc2 = plt.scatter(rp,zp)
+grid = GG()
+dat = grid.read_mesh3d(fn='Inputs/GEOMETRY_3D_DATA')
+grid.plot_mesh3d(dat,ind)
+
+
+plt.show()
+
+
diff --git a/Scripts_Matthieu/PrepareGeometryCompareNodal.py b/Scripts_Matthieu/PrepareGeometryCompareNodal.py
index 3a2b94fd080abf15da39e7265ec0f45642c3f0de..fe78d724389b8992d7729030c5c6c51461f9343b 100644
--- a/Scripts_Matthieu/PrepareGeometryCompareNodal.py
+++ b/Scripts_Matthieu/PrepareGeometryCompareNodal.py
@@ -4,18 +4,18 @@ import shutil
 import AnalyticalPrescriptions
 # The folder where the different cases should be stored is
 basepath = '/u/mbj/Drifts/data-for-drift-computations'
-folder = '/u/mbj/Drifts/data-for-drift-computations/CompareNodal105'
+folder = '/u/mbj/Drifts/data-for-drift-computations/TestUnCorrelatedNoise'
 os.mkdir(folder)
 # Choose the parameters for the case to test
 # Geometry
 # Number of zones considered
 nz = 1
 # Loop over different cases to prepare
-nts = [6, 12,24, 36, 48]
-nrs = [10, 20, 40, 60,80]
-nps = [50, 100, 200, 300,400]
+nts = [4,8,16,24,36]#, 12,18]#, 48]
+nrs = [8,16,32,48,64]#,80]
+nps = [40,80,160,240,320]#,400]
 #cases = [nrs,nps]
-caseNames = ['Case1','Case2','Case3', 'Case4', 'Case5']
+caseNames = ['Case1','Case2','Case3', 'Case4','Case5']#, 'Case4']#, 'Case5']
 # Number of toroidal, radial, poloidal surfaces
 bfielddescription = AnalyticalPrescriptions.bfieldstrength1
 for i in range(len(nps)):
diff --git a/Scripts_Matthieu/ResolutionStudyGeometries.py b/Scripts_Matthieu/ResolutionStudyGeometries.py
index 6641169ee280a9bc5b2b12899a8d8891f673df55..5f28b219b4108479285a563b84782fa7529c1e89 100644
--- a/Scripts_Matthieu/ResolutionStudyGeometries.py
+++ b/Scripts_Matthieu/ResolutionStudyGeometries.py
@@ -5,14 +5,14 @@ import AnalyticalPrescriptions
 import InputsGeometry
 # The folder where the different cases should be stored is
 basepath = '/u/mbj/Drifts/data-for-drift-computations'
-folder = '/u/mbj/Drifts/data-for-drift-computations/ResolutionStudies'
+folder = '/u/mbj/Drifts/data-for-drift-computations/ResolutionFinal'
 os.mkdir(folder)
 # Choose the parameters for the case to test
 # Geometry
 # Number of zones considered
 nz = 1
 # Analytical Profiles
-potentialdescription = AnalyticalPrescriptions.potential_description1
+#potentialdescription = AnalyticalPrescriptions.potential_description1
 bfielddescription = AnalyticalPrescriptions.bfieldstrength1
 # General Info
 # toroidal range (degrees)
@@ -22,14 +22,14 @@ Rmajor = 500
 Rminor = 100
 
 # Loop over different cases to prepare
-nts = [10, 20, 40, 80, 160]
-nrs = [12, 25, 50, 100, 200]
-nps = [40, 80, 160, 320, 640]
+nts = [5, 10, 20, 30]
+nrs = [10, 20, 40, 60]
+nps = [40, 80, 160, 240]
 
 #cases = [nrs,nps]
-caseNamesT = ['Case10', 'Case20', 'Case40','Case80', 'Case160']
-caseNamesP = ['Case40', 'Case80', 'Case160', 'Case320', 'Case640']
-caseNamesR = ['Case12', 'Case25', 'Case50', 'Case100', 'Case200']
+caseNamesT = ['Case1', 'Case2', 'Case3','Case4']
+caseNamesP = ['Case1', 'Case2', 'Case3','Case4']
+caseNamesR = ['Case1', 'Case2', 'Case3','Case4']
 # Number of toroidal, radial, poloidal surfaces
 
 
@@ -46,8 +46,21 @@ for i in range(len(nts)):
     os.mkdir(path)
     nt = nts[i]
     
-    InputsGeometry.createInputFiles(nz, nr, np, nt,path, trange, Rmajor, Rminor,
-    bfielddescription, potentialdescription)
+    # Create the file with the general info
+    gi_path = path+"/input.geo"
+    CreateGeneralInfo(gi_path, nz,[nr],[np],[nt])
+
+
+    # Create the GEOMETRY_3D_DATA file
+    [tor,rad,hor] = create_toroidal_vertices(nt,nr,np,trange,Rmajor,Rminor)
+    print(tor)
+    filepath = path+'/GEOMETRY_3D_DATA'
+    writeGEOMETRY3DDATA(tor,rad,hor,nt,nr,np,filepath)
+
+    # Create the magnetic field data
+    # Create an analytical description
+    bfieldpath = path+"/BFIELD_STRENGTH"
+    imposeAnalytic(tor,rad,hor,bfielddescription,Rmajor, Rminor, bfieldpath)
 
 
 nr = nrs[2]
@@ -61,8 +74,21 @@ for i in range(len(nps)):
     path = path + "/Inputs"
     os.mkdir(path)
     np = nps[i]
-    InputsGeometry.createInputFiles(nz, nr, np, nt,path, trange, Rmajor, Rminor,
-    bfielddescription, potentialdescription)
+    # Create the file with the general info
+    gi_path = path+"/input.geo"
+    CreateGeneralInfo(gi_path, nz,[nr],[np],[nt])
+
+
+    # Create the GEOMETRY_3D_DATA file
+    [tor,rad,hor] = create_toroidal_vertices(nt,nr,np,trange,Rmajor,Rminor)
+    print(tor)
+    filepath = path+'/GEOMETRY_3D_DATA'
+    writeGEOMETRY3DDATA(tor,rad,hor,nt,nr,np,filepath)
+
+    # Create the magnetic field data
+    # Create an analytical description
+    bfieldpath = path+"/BFIELD_STRENGTH"
+    imposeAnalytic(tor,rad,hor,bfielddescription,Rmajor, Rminor, bfieldpath)
 
 
 nt = nts[2]
@@ -77,6 +103,19 @@ for i in range(len(nrs)):
     os.mkdir(path)
     nr = nrs[i]
     
-    InputsGeometry.createInputFiles(nz, nr, np, nt,path, trange, Rmajor, Rminor,
-    bfielddescription, potentialdescription)
+    # Create the file with the general info
+    gi_path = path+"/input.geo"
+    CreateGeneralInfo(gi_path, nz,[nr],[np],[nt])
+
+
+    # Create the GEOMETRY_3D_DATA file
+    [tor,rad,hor] = create_toroidal_vertices(nt,nr,np,trange,Rmajor,Rminor)
+    print(tor)
+    filepath = path+'/GEOMETRY_3D_DATA'
+    writeGEOMETRY3DDATA(tor,rad,hor,nt,nr,np,filepath)
+
+    # Create the magnetic field data
+    # Create an analytical description
+    bfieldpath = path+"/BFIELD_STRENGTH"
+    imposeAnalytic(tor,rad,hor,bfielddescription,Rmajor, Rminor, bfieldpath)
    
diff --git a/Scripts_Matthieu/TimeScanTrajectories.py b/Scripts_Matthieu/TimeScanTrajectories.py
new file mode 100644
index 0000000000000000000000000000000000000000..146bc63ec2711cc5751eef4b70cd786fbbeed103
--- /dev/null
+++ b/Scripts_Matthieu/TimeScanTrajectories.py
@@ -0,0 +1,127 @@
+import matplotlib.pyplot as plt
+import os
+import sys
+import AnalysisFunctions
+import math
+import seaborn as sns
+import pandas as pd
+
+analyticalCase = sys.argv[1]
+path = os.getcwd()
+methods = sys.argv[3:]
+print(methods)
+noise = sys.argv[2]
+methodnames = ['GG CC', 'LS CC', 'GG N', 'LS N', 'MAP']
+methodsused = methods[0:len(methods)]
+pathlengths = []
+deviations = []
+pathdeviations = []
+reldev = []
+relpdev = []
+timesteps = []
+timestepss=[]
+errors = []
+errorsmag = []
+dataT = pd.DataFrame()
+for k in range(len(methods)):
+    method =methods[k]
+    print(method)
+    pls = []
+    devs = []
+    pdevs = []
+    rds = []
+    rpds = []
+    ts = []
+    tss=[]
+    errs = []
+    erms = []
+    aPL = 0
+    trajInfo = AnalysisFunctions.readText(path+ '/Output' + method+'Descr'+analyticalCase+'Noise'+noise+'/TRAJECTORY_INFO')
+    dim = len(trajInfo)
+    for i in range(int(dim/2)):
+        t = [float(x) for x in trajInfo[2*i].split()]
+        er = [float(x) for x in trajInfo[2*i+1].split()]
+        pls.append(t[0])
+        devs.append(abs(t[1]))
+        ts.append(round(math.log(t[2],10)))
+        tss.append(t[2])
+        rds.append(devs[-1]/pls[-1])
+        errs.append(er)
+        erm = math.sqrt(er[0]**2+er[1]**2+er[2]**2)
+        erms.append(erm)
+        onedata = {'Method':methodnames[k],'Tracing Error':erm, 'Deviation':abs(t[1]), 'Pathlength':t[0],'Relative Deviation':rds[-1], 'Time Step':t[2]}
+        dataT = dataT.append(onedata,ignore_index=True)
+#        pdevs.append(abs(t[0]-aPL))
+#        rpds.append(pdevs[-1]/pls[-1])
+    pathlengths.append(pls)
+    deviations.append(devs)
+#    pathdeviations.append(pdevs)
+    reldev.append(rds)
+#    relpdev.append(rpds)
+    timesteps.append(ts)
+    timestepss.append(tss)
+    errors.append(errs)
+    errorsmag.append(erms)
+
+upperX = 1e-5
+lowerX = 1e-10
+upperY = 1e-4
+lowerY = 1e-8 
+xlabel = 'Timestep [s]'
+ylabel = 'Relative Deviation (after 1s)'
+
+print(reldev)
+
+plot1 = plt.figure(1)
+for i in range(len(methods)):
+    print(reldev[i])
+    print(timestepss[i])
+    sc1 = plt.scatter(timestepss[i],reldev[i],label = methodnames[i])
+
+ax = plt.gca()
+ax.set_yscale('log')
+ax.set_xscale('log')
+limits = [lowerX, upperX, lowerY, upperY]
+ax.legend()
+plt.axis(limits)
+plt.title('Relative Deviation'+analyticalCase)
+plt.xlabel(xlabel)
+plt.ylabel(ylabel)
+plt.savefig(path+'/Descr'+analyticalCase+'ReldevTS.pdf')
+
+upperY = 1e-2
+lowerY = 1e-5
+plot3 = plt.figure(3)
+for i in range(len(methods)):
+    sc1 = plt.scatter(timestepss[i],errorsmag[i],label = methodsused[i])
+
+
+ax = plt.gca()
+ax.set_yscale('log')
+ax.set_xscale('log')
+limits = [lowerX, upperX, lowerY, upperY]
+ax.legend()
+plt.axis(limits)
+plt.title('Average Velocity Error'+analyticalCase)
+plt.xlabel(xlabel)
+plt.ylabel(ylabel)
+plt.savefig(path+'/Descr'+analyticalCase+'AvVelErrTS.pdf')
+plt.show()
+
+
+#sns.violinplot(methods,errorsmag,hue=noiselevels)
+#plt.show()
+plot4=plt.figure(4)
+plot = sns.violinplot(x=dataT['Time Step'],y='Tracing Error',hue='Method',data=dataT, scale="area", cut=0.1)
+plt.title('Average Velocity Error')
+plt.savefig(path+'/Descr'+analyticalCase+'AvVelErrViolinTS.pdf')
+
+
+plot5=plt.figure(5)
+plot = sns.violinplot(x='Time Step',y='Relative Deviation',hue='Method',data=dataT, scale="area", cut=0.1)
+plt.title('Relative Deviation')
+plt.savefig(path+'/Descr'+analyticalCase+'RelDevViolinTS.pdf')
+
+print(dataT)
+plt.show()
+	
diff --git a/Scripts_Matthieu/TrajectoryDeviation.py b/Scripts_Matthieu/TrajectoryDeviation.py
index 9aa7488556c4a0d059a07e319b3d95fc1ab0edb8..7a916c80763b5ab857cf61ef52450f077799410c 100644
--- a/Scripts_Matthieu/TrajectoryDeviation.py
+++ b/Scripts_Matthieu/TrajectoryDeviation.py
@@ -2,34 +2,67 @@ import matplotlib.pyplot as plt
 import os
 import sys
 import AnalysisFunctions
-
+import math
+import seaborn as sns
+import pandas as pd
+plt.rcParams.update({'font.size': 12})
 analyticalCase = sys.argv[1]
 path = os.getcwd()
-methods = ['6','7','8','9', '10']
-noise = '0'
-
+methods = sys.argv[3:]
+print(methods)
+noise = sys.argv[2]
+methodnames = ['GG CC', 'LS CC', 'GG N', 'LS N', 'MAP']
+methodsused = methods[0:len(methods)]
 pathlengths = []
 deviations = []
+pathdeviations = []
 reldev = []
+relpdev = []
 timesteps = []
-
-for method in methods:
+errors = []
+errorsmag = []
+dataT = pd.DataFrame()
+for k in range(len(methods)):
+    method =methods[k]
+    print(method)
     pls = []
     devs = []
+    pdevs = []
     rds = []
+    rpds = []
     ts = []
+    errs = []
+    erms = []
+    dpls=[]
+    aPL = 0
     trajInfo = AnalysisFunctions.readText(path+ '/Output' + method+'Descr'+analyticalCase+'Noise'+noise+'/TRAJECTORY_INFO')
-    for i in range(len(trajInfo)):
-        t = [float(x) for x in trajInfo[i].split()]
-        print(t)
+    dim = len(trajInfo)
+    for i in range(int(dim/4)):
+        t = [float(x) for x in trajInfo[4*i].split()]
+        er = [float(x) for x in trajInfo[4*i+1].split()]
+        ersign=[float(x) for x in trajInfo[4*i+2].split()]
+        anvel=[float(x) for x in trajInfo[4*i+3].split()]
+        anpl = (math.sqrt(anvel[0]**2+(anvel[1]+1000)**2+anvel[2]**2))*100*0.1
         pls.append(t[0])
+        dpls.append(abs(anpl-pls[-1])/anpl)
         devs.append(abs(t[1]))
         ts.append(t[2])
-        rds.append(devs[i]/pls[i])
+        rds.append(devs[-1]/pls[-1])
+        errs.append(er)
+        erm = math.sqrt(er[0]**2+er[1]**2+er[2]**2)
+        erms.append(erm)
+        onedata = {'Method':methodnames[k],'Tracing Error':erm, 'Deviation':abs(t[1]), 'Pathlength':t[0],'Relative Deviation':rds[-1],'Poloidal Velocity Error':er[2], 'Radial Velocity Error':er[0], 'PolVel':abs(ersign[2]),'RadVel':abs(ersign[0]), 'AnVel':anvel,'PDL':dpls[-1]}
+        dataT = dataT.append(onedata,ignore_index=True)
+#        pdevs.append(abs(t[0]-aPL))
+#        rpds.append(pdevs[-1]/pls[-1])
     pathlengths.append(pls)
     deviations.append(devs)
+#    pathdeviations.append(pdevs)
     reldev.append(rds)
+#    relpdev.append(rpds)
     timesteps.append(ts)
+    errors.append(errs)
+    errorsmag.append(erms)
 
 upperX = 1e-4
 lowerX = 1e-9
@@ -40,25 +73,104 @@ ylabel = 'Relative Deviation (after 1s)'
 
 
 plot1 = plt.figure(1)
-sc1 = plt.scatter(timesteps[0],reldev[0],label = methods[0])
-sc2 = plt.scatter(timesteps[1],reldev[1],label = methods[1])
-sc3 = plt.scatter(timesteps[0],reldev[2],label = methods[2])
-sc4 = plt.scatter(timesteps[1],reldev[3],label = methods[3])
-sc5 = plt.scatter(timesteps[0],reldev[4],label = methods[4])
+for i in range(len(methods)):
+    sc1 = plt.scatter(timesteps[i],reldev[i],label = methodsused[i])
+
+ax = plt.gca()
+ax.set_yscale('log')
+ax.set_xscale('log')
+limits = [lowerX, upperX, lowerY, upperY]
+ax.legend()
+plt.axis(limits)
+plt.title('Relative Deviation '+analyticalCase+' [-] ')
+plt.xlabel(xlabel)
+plt.ylabel(ylabel)
+plt.savefig(path+'/Descr'+analyticalCase+'Reldev.pdf')
+
+#plot2 = plt.figure(2)
+#sc1 = plt.scatter(timesteps[0],relpdev[0],label = methods[0])
+#sc2 = plt.scatter(timesteps[1],relpdev[1],label = methods[1])
+#sc3 = plt.scatter(timesteps[0],relpdev[2],label = methods[2])
+#sc4 = plt.scatter(timesteps[1],relpdev[3],label = methods[3])
+#sc5 = plt.scatter(timesteps[0],reldev[4],label = methods[4])
+
+#ax = plt.gca()
+#ax.set_yscale('log')
+#ax.set_xscale('log')
+#limits = [lowerX, upperX, lowerY, upperY*100]
+#ax.legend()
+##x = np.linspace(0.000001,1,100)
+##y = x**(1)/100
+##plt.loglog(x,y)
+#plt.axis(limits)
+#plt.title('Relative Pathlength Deviation'+analyticalCase)
+#plt.xlabel(xlabel)
+#plt.ylabel(ylabel)
+#plt.savefig(path+'/Descr'+analyticalCase+'PathReldev.png')
+
+plot3 = plt.figure(3)
+for i in range(len(methods)):
+    sc1 = plt.scatter(timesteps[i],errorsmag[i],label = methodsused[i])
+
 
 ax = plt.gca()
 ax.set_yscale('log')
 ax.set_xscale('log')
 limits = [lowerX, upperX, lowerY, upperY]
 ax.legend()
-#x = np.linspace(0.000001,1,100)
-#y = x**(1)/100
-#plt.loglog(x,y)
 plt.axis(limits)
-plt.title('Relative Deviation'+analyticalCase)
+plt.title('Average Velocity Error '+analyticalCase +' [-] ')
 plt.xlabel(xlabel)
 plt.ylabel(ylabel)
-plt.savefig(path+'/Descr'+analyticalCase+'Reldev.png')
+plt.savefig(path+'/Descr'+analyticalCase+'AvVelErr.pdf')
+
+
+
+#sns.violinplot(methods,errorsmag,hue=noiselevels)
+#plt.show()
+plot4=plt.figure(4)
+plot = sns.violinplot(x=dataT['Method'],y='Tracing Error',data=dataT, scale="area", cut=0.1)
+plt.ticklabel_format(axis="y", style="sci", scilimits=(0,0))
+plt.title('Average Velocity Error [-]')
+plt.savefig(path+'/Descr'+analyticalCase+'AvVelErr.pdf')
+
+
+plot5=plt.figure(5)
+plot = sns.violinplot(x=dataT['Method'],y='Relative Deviation',data=dataT, scale="area", cut=0.1)
+plt.title('Relative Deviation [-]')
+plt.ticklabel_format(axis="y", style="sci", scilimits=(0,0))
+plt.savefig(path+'/Descr'+analyticalCase+'RelDevViolin.pdf')
+
+plot6=plt.figure(6)
+plt.ticklabel_format(axis="y", style="sci", scilimits=(0,0))
+plot = sns.violinplot(x=dataT['Method'],y='Radial Velocity Error',data=dataT, scale="area", cut=0.1)
+plt.title('Radial Velocity Error [-]')
+plt.savefig(path+'/Descr'+analyticalCase+'RadVelViolin.pdf')
+
+plot6=plt.figure(7)
+plt.ticklabel_format(axis="y", style="sci", scilimits=(0,0))
+plot = sns.violinplot(x=dataT['Method'],y='Poloidal Velocity Error',data=dataT, scale="area", cut=0.1)
+plt.title('Poloidal Velocity Error [-]')
+plt.savefig(path+'/Descr'+analyticalCase+'PolVelViolin.pdf')
+
+plot6=plt.figure(8)
+plt.ticklabel_format(axis="y", style="sci", scilimits=(0,0))
+plot = sns.violinplot(x=dataT['Method'],y='PolVel',data=dataT, scale="area", cut=0.1)
+plt.title('Poloidal Velocity Error (Bias) [-]')
+plt.savefig(path+'/Descr'+analyticalCase+'PolVelBiasViolin.pdf')
+
+plot6=plt.figure(9)
+plt.ticklabel_format(axis="y", style="sci", scilimits=(0,0))
+plot = sns.violinplot(x=dataT['Method'],y='RadVel',data=dataT, scale="area", cut=0.1)
+plt.title('Radial Velocity Error (Bias) [-]')
+plt.savefig(path+'/Descr'+analyticalCase+'RadVelBiasViolin.pdf')
+
+plot6=plt.figure(10)
+plt.ticklabel_format(axis="y", style="sci", scilimits=(0,0))
+plot = sns.violinplot(x=dataT['Method'],y='PDL',data=dataT, scale="area", cut=0.1)
+plt.title('Relative Pathlength Difference [-]')
+plt.savefig(path+'/Descr'+analyticalCase+'Relative Pathlength Difference.pdf')
 
+print(dataT)
 plt.show()
 	
diff --git a/Scripts_Matthieu/TrajectoryDeviationNoisy.py b/Scripts_Matthieu/TrajectoryDeviationNoisy.py
new file mode 100644
index 0000000000000000000000000000000000000000..e57df3de498d4930ee23f99ebe615364821f8600
--- /dev/null
+++ b/Scripts_Matthieu/TrajectoryDeviationNoisy.py
@@ -0,0 +1,91 @@
+import matplotlib.pyplot as plt
+import os
+import sys
+import AnalysisFunctions
+import math
+import seaborn as sns
+import pandas as pd
+plt.rcParams.update({'font.size': 12})
+analyticalCase = sys.argv[1]
+path = os.getcwd()
+mn = sys.argv[2:]
+lenmn=len(mn)
+lenm=int(lenmn/2)
+methods=mn[0:lenm]
+print(methods)
+noises=mn[lenm:]
+noiseLevels=[0.3,0.6,0.9,1.2,1.5]
+methodnames = ['GG CC', 'LS CC', 'GG N', 'LS N', 'MAP']
+methodsused = methodnames[0:len(methods)]
+dataT = pd.DataFrame()
+
+for k in range(len(methods)):
+    method =methods[k]
+    print(method)
+    for j in range(len(noises)):
+        noise = noiseLevels[j]
+        trajInfo = AnalysisFunctions.readText(path+ '/Output' + method+'Descr'+analyticalCase+'Noise'+noises[j]+'/TRAJECTORY_INFO')
+        dim = len(trajInfo)
+        for i in range(int(dim/4)):
+            t = [float(x) for x in trajInfo[4*i].split()]
+            er = [float(x) for x in trajInfo[4*i+1].split()]
+            ersign=[float(x) for x in trajInfo[4*i+2].split()]
+            anvel=[float(x) for x in trajInfo[4*i+3].split()]
+            anpl = (math.sqrt(anvel[0]**2+(anvel[1]+1000)**2+anvel[2]**2))*100*0.1
+            erm = math.sqrt(er[0]**2+er[1]**2+er[2]**2)
+            onedata = {'Method':methodnames[k],'Tracing Error':erm, 'Deviation':abs(t[1]), 'Pathlength':t[0],'Relative Deviation':abs(t[1])/t[0],'Poloidal Velocity Error':er[2], 'Radial Velocity Error':er[0], 'PolVel':abs(ersign[2]),'RadVel':abs(ersign[0]), 'AnVel':anvel,'PDL':abs(anpl-t[0])/anpl, 'Noise Standard Deviation':noise}
+            dataT = dataT.append(onedata,ignore_index=True)
+
+
+plot1=plt.figure(1)
+plot = sns.violinplot(x=dataT['Method'],y='Tracing Error',hue='Noise Standard Deviation',data=dataT, scale="area", cut=0.1)
+plt.ticklabel_format(axis="y", style="sci", scilimits=(0,0))
+plt.title('Average Velocity Error')
+plt.legend(loc=2, prop={'size': 10})
+plt.savefig(path+'/Descr'+analyticalCase+'AvVelErr.pdf')
+
+
+plot2=plt.figure(2)
+plot = sns.violinplot(x=dataT['Method'],y='Relative Deviation',hue='Noise Standard Deviation',data=dataT, scale="area", cut=0.1)
+plt.ticklabel_format(axis="y", style="sci", scilimits=(0,0))
+plt.title('Relative Deviation')
+plt.legend(loc=2, prop={'size': 10})
+plt.savefig(path+'/Descr'+analyticalCase+'RelDevViolin.pdf')
+
+plot6=plt.figure(6)
+plt.ticklabel_format(axis="y", style="sci", scilimits=(0,0))
+plot = sns.violinplot(x=dataT['Method'],y='Radial Velocity Error',hue='Noise Standard Deviation',data=dataT, scale="area", cut=0.1)
+plt.title('Radial Velocity Error [-]')
+plt.legend(loc=2, prop={'size': 10})
+plt.savefig(path+'/Descr'+analyticalCase+'RadVelViolin.pdf')
+
+plot6=plt.figure(7)
+plt.ticklabel_format(axis="y", style="sci", scilimits=(0,0))
+plot = sns.violinplot(x=dataT['Method'],y='Poloidal Velocity Error',hue='Noise Standard Deviation',data=dataT, scale="area", cut=0.1)
+plt.title('Poloidal Velocity Error [-]')
+plt.savefig(path+'/Descr'+analyticalCase+'PolVelViolin.pdf')
+
+plot6=plt.figure(8)
+plt.ticklabel_format(axis="y", style="sci", scilimits=(0,0))
+plot = sns.violinplot(x=dataT['Method'],y='PolVel',hue='Noise Standard Deviation',data=dataT, scale="area", cut=0.1)
+plt.title('Poloidal Velocity Error (Bias) [-]')
+plt.legend(loc=2, prop={'size': 10})
+plt.savefig(path+'/Descr'+analyticalCase+'PolVelBiasViolin.pdf')
+
+plot6=plt.figure(9)
+plt.ticklabel_format(axis="y", style="sci", scilimits=(0,0))
+plot = sns.violinplot(x=dataT['Method'],y='RadVel',hue='Noise Standard Deviation',data=dataT, scale="area", cut=0.1)
+plt.title('Radial Velocity Error (Bias) [-]')
+plt.legend(loc=2, prop={'size': 10})
+plt.savefig(path+'/Descr'+analyticalCase+'RadVelBiasViolin.pdf')
+
+plot6=plt.figure(10)
+plt.ticklabel_format(axis="y", style="sci", scilimits=(0,0))
+plot = sns.violinplot(x=dataT['Method'],y='PDL',hue='Noise Standard Deviation',data=dataT, scale="area", cut=0.1)
+plt.title('Relative Pathlength Difference [-]')
+plt.legend(loc=2, prop={'size': 10})
+plt.savefig(path+'/Descr'+analyticalCase+'Relative Pathlength Difference.pdf')
+
+
+plt.show()
+	
diff --git a/Scripts_Matthieu/getCorrelation.py b/Scripts_Matthieu/getCorrelation.py
new file mode 100644
index 0000000000000000000000000000000000000000..19171cb3ae8ef0f8725ee1e9a500808d990ccbd7
--- /dev/null
+++ b/Scripts_Matthieu/getCorrelation.py
@@ -0,0 +1,65 @@
+import math
+import driftFunctions
+import AnalyticalPrescriptions
+import Constants
+import Utilities
+import pandas as pd
+import matplotlib.pyplot as plt
+import PlottingFunctions
+import AnalysisFunctions
+import os
+import sys
+import numpy as np
+from scipy import sparse
+
+
+geoData = AnalysisFunctions.readText('/u/mbj/Drifts/data-for-drift-computations/Reference-FullRes/Output5Descr6Noise0/NBS')
+dim = math.floor(len(geoData)/5)
+print(dim)
+boundary = np.empty(dim)
+neighbours = np.empty((dim,6))
+coords = np.empty((dim,3))
+
+for i in range(dim):
+    boundary[i] = geoData[5*i+1]
+    neighbours[i][:] = [float(x) for x in geoData[5*i].split()]
+    coords[i][:] =[float(x) for x in geoData[5*i+2].split()]
+
+xnames = ['_1','_2','_3','_4','_5','_6','_7','_8','_9']#,'_10']
+Tdata = []
+
+for i in range(len(xnames)):
+    file = open('/u/mbj/Drifts/data-for-drift-computations/Reference-FullRes/X-TEST-HR/x-out'+xnames[i]+'/x-out/TE_TI','r')
+    Temptemp = file.readlines()
+    Temp = []
+    for i in range(len(Temptemp)):
+        Temp = Temp+[float(x) for x in Temptemp[i].split()]
+#    print(Temp)
+    print(len(Temp))
+    Temp = np.array(Temp)
+#    Temp = np.loadtxt('/u/mbj/Drifts/data-for-drift-computations/Reference-FullRes/X-TEST-HR/x-out'+xnames[i]+'/x-out/TE_TI', max_rows = 25851)
+#    Temp =np.concatenate(Temp.flatten(),np.loadtxt('/u/mbj/Drifts/data-for-drift-computations/Reference-FullRes/X-TEST-HR/x-out'+xnames[i]+'/x-out/TE_TI',skiprows=25851,max_rows = 1))
+#    Tlength = len(Temp)
+#    print(Tlength)
+#    Temp.flatten()
+    Tdata.append(Temp)
+    
+avTemp = Tdata[0]    
+for i in range(1,len(xnames)):
+    avTemp = np.add(avTemp,Tdata[i])
+avTemp = avTemp*1/len(xnames)
+
+difData = []  
+normdifData = []  
+for i in range(len(xnames)):
+    difTemp = np.absolute(np.subtract(avTemp,Tdata[i]))
+    normdifTemp = np.divide(difTemp,avTemp)
+    difData.append(difTemp)
+    normdifData.append(normdifTemp)
+    
+avDif = difData[0]    
+for i in range(1,len(xnames)):
+    avDif = np.add(avDif,difData[i])
+avDif = avDif*1/len(xnames)
+
+print(len(avDif))
diff --git a/Scripts_Matthieu/writeInfo.py b/Scripts_Matthieu/writeInfo.py
index 36f50c885a0925b60c97e1bc9a5f2eeccf5544f9..ed42ba56ef9f84adde0cdddcfabeea9264212fc4 100644
--- a/Scripts_Matthieu/writeInfo.py
+++ b/Scripts_Matthieu/writeInfo.py
@@ -14,6 +14,7 @@ path = os.getcwd()
 method = sys.argv[1]
 analyticalP = sys.argv[2]
 noise = sys.argv[3]
+#weight = sys.argv[4]
 aP = int(analyticalP)
 
 positions = AnalysisFunctions.readText(path+ '/Output' + method+'Descr'+analyticalP+'Noise'+noise+'/DRIFT_POS')
@@ -25,23 +26,31 @@ boundarycells =  AnalysisFunctions.readText(path+ '/Output' + method+'Descr'+ana
 boundarynodes = AnalysisFunctions.readText(path+ '/Output' + method+'Descr'+analyticalP+'Noise'+noise+'/BOUNDARYNODES')
 gbpositions =  AnalysisFunctions.readText(path+ '/Output' + method+'Descr'+analyticalP+'Noise'+noise+'/GB_POS')
 ggcorr = AnalysisFunctions.readText(path+ '/Output' + method+'Descr'+analyticalP+'Noise'+noise+'/GGCORR')
+potential =  AnalysisFunctions.readText(path+ '/Output' + method+'Descr'+analyticalP+'Noise'+noise+'/POTENTIAL_C')
 
 Rmajor = 500
 if aP ==1:
     edescr = AnalyticalPrescriptions.e_descr1
+    pdescr = AnalyticalPrescriptions.potential_description1
 elif aP == 5:
     edescr = AnalyticalPrescriptions.e_descr5
+    pdescr = AnalyticalPrescriptions.potential_description5
 elif aP==6:
     edescr = AnalyticalPrescriptions.e_descr6
+    pdescr = AnalyticalPrescriptions.potential_description1
     Rmajor = 590
+elif aP==7:
+    edescr= AnalyticalPrescriptions.e_descr7
+    pdescr = AnalyticalPrescriptions.potential_description2
 elif aP >= 2:
     edescr = AnalyticalPrescriptions.e_descr2
+    pdescr = AnalyticalPrescriptions.potential_description2
     
 bdescr = AnalyticalPrescriptions.b_field_description1
 gdescr = AnalyticalPrescriptions.gradb_description1
 exb = driftFunctions.ExBdrift
 gB = driftFunctions.gradBdrift
 
-allInfo = AnalysisFunctions.getAllInfo(positions,gbpositions,edrifts,efield,gradb,gdrifts,boundarycells,boundarynodes,edescr,bdescr,gdescr,exb, gB, ggcorr,Rmajor, method+'Descr'+analyticalP+'Noise'+noise)
+allInfo = AnalysisFunctions.getAllInfo(positions,gbpositions,edrifts,efield,gradb,gdrifts,boundarycells,boundarynodes,edescr,bdescr,gdescr,exb, gB, ggcorr,potential,pdescr,Rmajor, method+'Descr'+analyticalP+'Noise'+noise)
 
 print('CSV file written')