From 843414f9c4180cd66beeb04e334e67069969202e Mon Sep 17 00:00:00 2001
From: Markus Scheidgen <markus.scheidgen@gmail.com>
Date: Sun, 1 Mar 2020 20:43:01 +0100
Subject: [PATCH] Refactored datamodel to use metainfo2. #221

---
 docs/datamodel_metadataflow.png               | Bin 70932 -> 0 bytes
 docs/datamodel_transformations.png            | Bin 16268 -> 0 bytes
 docs/introduction.md                          |   3 -
 examples/domain.py                            |   6 +-
 .../metaInfoBrowser/MetaInfoBrowser.js        |   4 +-
 nomad/app/__init__.py                         |  12 +-
 nomad/app/api/__init__.py                     |   4 +-
 nomad/app/api/api.py                          |   2 +-
 nomad/app/api/archive.py                      |  28 +-
 nomad/app/api/auth.py                         |  28 +-
 nomad/app/api/common.py                       |  61 +-
 nomad/app/api/dataset.py                      |  10 +-
 nomad/app/api/info.py                         |  17 +-
 nomad/app/api/mirror.py                       |   8 +-
 nomad/app/api/raw.py                          |  36 +-
 nomad/app/api/repo.py                         |  82 +-
 nomad/app/api/upload.py                       |  47 +-
 nomad/app/common.py                           |  12 +-
 nomad/app/optimade/__init__.py                |   4 +-
 nomad/app/optimade/api.py                     |   4 +-
 nomad/app/optimade/endpoints.py               |  12 +-
 nomad/app/optimade/filterparser.py            |  10 +-
 nomad/app/optimade/models.py                  |  10 +-
 nomad/archive.py                              |  12 +-
 nomad/archive_query.py                        |  16 +-
 nomad/cli/__init__.py                         |   4 +-
 nomad/cli/admin/admin.py                      |  16 +-
 nomad/cli/admin/migration.py                  |  12 +-
 nomad/cli/admin/uploads.py                    |   9 +-
 nomad/cli/client/client.py                    |   2 +-
 nomad/cli/client/integrationtests.py          |   4 +-
 nomad/cli/client/local.py                     |  22 +-
 nomad/cli/client/mirror.py                    |   6 +-
 nomad/cli/client/statistics.py                |   4 +-
 nomad/cli/client/update_database.py           |   4 +-
 nomad/cli/client/upload.py                    |   4 +-
 nomad/cli/parse.py                            |  14 +-
 nomad/config.py                               |  16 +-
 nomad/datamodel/__init__.py                   |  65 +-
 nomad/datamodel/base.py                       | 823 +++++++-----------
 nomad/datamodel/dft.py                        | 273 +++---
 nomad/datamodel/ems.py                        | 101 +--
 nomad/datamodel/metainfo.py                   | 373 +++++++-
 nomad/doi.py                                  |   8 +-
 nomad/files.py                                | 129 ++-
 nomad/infrastructure.py                       |  36 +-
 nomad/metainfo/CONCEPT.md                     |   8 +-
 nomad/metainfo/__init__.py                    |  16 +-
 nomad/metainfo/elastic.py                     |   6 +-
 nomad/metainfo/example.py                     |  16 +-
 nomad/metainfo/flask_restplus.py              |   2 +-
 nomad/metainfo/legacy.py                      |  14 +-
 nomad/metainfo/metainfo.py                    | 380 +++++---
 nomad/metainfo/mongoengine.py                 |   4 +-
 nomad/metainfo/optimade.py                    |  36 +-
 nomad/metainfo/search.py                      | 116 +++
 nomad/normalizing/__init__.py                 |   4 +-
 nomad/normalizing/data/springer_msgpack.py    |  16 +-
 nomad/normalizing/normalizer.py               |  16 +-
 nomad/normalizing/optimade.py                 |   8 +-
 nomad/normalizing/structure.py                |   8 +-
 nomad/normalizing/system.py                   |  36 +-
 nomad/parsing/__init__.py                     |  12 +-
 nomad/parsing/artificial.py                   |  24 +-
 nomad/parsing/backend.py                      | 100 +--
 nomad/parsing/metainfo.py                     |  36 +-
 nomad/parsing/parser.py                       |  32 +-
 nomad/processing/__init__.py                  |   4 +-
 nomad/processing/base.py                      |  60 +-
 nomad/processing/data.py                      | 290 +++---
 nomad/search.py                               | 675 ++++++++------
 nomad/utils.py                                |  56 +-
 tests/__init__.py                             |   4 +-
 tests/app/resource.py                         |   4 +-
 tests/app/test_api.py                         | 119 +--
 tests/app/test_optimade.py                    |   4 +-
 tests/bravado_flask.py                        |  36 +-
 tests/conftest.py                             |  77 +-
 tests/data/parsers/octopus/stdout.txt         |   2 +-
 tests/processing/test_data.py                 |  48 +-
 tests/test_client.py                          |   6 +-
 tests/test_datamodel.py                       |  80 +-
 tests/test_files.py                           | 110 ++-
 tests/test_metainfo.py                        |  60 +-
 tests/test_normalizing.py                     |  82 +-
 tests/test_parsing.py                         |   4 +-
 tests/test_search.py                          |  73 +-
 tests/utils.py                                |  10 +-
 88 files changed, 2789 insertions(+), 2258 deletions(-)
 delete mode 100644 docs/datamodel_metadataflow.png
 delete mode 100644 docs/datamodel_transformations.png
 create mode 100644 nomad/metainfo/search.py

diff --git a/docs/datamodel_metadataflow.png b/docs/datamodel_metadataflow.png
deleted file mode 100644
index 5dc4c1f634d93ebd6911022d5bd3e9763781ecee..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 70932
zcmeAS@N?(olHy`uVBq!ia0y~yU^Zf4V0_NO#=yXk9GNkLfq{XsILO_JVcj{ImkbOH
zoCO|{#S9GG!XV7ZFl&wk0|OIxW=KRygs+cPa(=E}VoH8es$NBI0RsrwR9IEy7UZUu
zBq~(o=HwMyRoE(lRaoT}TY-f2l@!2AO0sR0B76fBob!uP6-@O^^bC~jxD*r=Y>HCS
ztb$zJpxTR4(rlG7N=gc>^!3Zj%k|2Q_413-^$jg8E%gnI^o@*ki&D~bi!1X=5-W7`
zij^UTz|3(;Elw`VEGWs$&r<-Io0ybeT4JlD1hPm01|aS%$xK7olvfP(R&su>K1fF2
zP|rXgZZ^nxkf>EqYH@x}DaiQHyv!0iBO9<{bYX;THeg-O`MCu}sl~;a`FVDxP%X$}
z=<5B8GE?(P5<%A38A8<~i=nFz$jC3r&rK~U%1rjmD@iRXC`yIuhB*+o3aD8&`k>%K
z3O-2OfJK8`-0Zk)^ue(RicveRwoB_&7#JKFJY5_^Dj46yvhI=D`pfrj@2=hAQCy}=
zwqz9QhAqihc*9xlonHU(`RyXhmu?A=;S$N;D5fQ{a`*1t-|Nr4c~bW7-rn!;-p$=>
zy}Nqf{r7KDj6BsWC!c-dy)UI_Vh5w40^5-W2?6FL2Ohkf00y=;9?4DH87_*3qN-$>
z)VQee%v2*)41-QIGin-DPFU=XZaB+iMopuGHF~F}qT5_)px~;;UsU3WYDY(7f{}u&
z+KltAJB84ks=&{-<ir!lKD|yfzX<rSEjhvRd8bA=y7f;EHi*o$IdM@H-SbT+8$@O%
z*oi;&Lbu*VVuI3ScFSEVXr6R1ker|-eCGM07<B6=urmjxd{S_qjvl-mYRo|?O+RC%
zXro*IDZ!!3NAAgsN$8<+FvX$E$6$VU5qcOq$nkhgJlQN0ixDa_cswR@J}VAf*L2=N
zGNsUV!*$zFceJNJYPr7WM(=0I1OFMHGIp8AY@RT^?kFh!J(C}p-kJIK+HuZvj-lV}
z_JluYxvw4ciGSyJdD)kX{v2tL@Ze=za$?D`;vl`|;|E@z2y!)#wwWj9D)zHJo9_<8
z9%<JKRzXnQsB|~FeCOZuvifdo?7c^=wa$0_c{&*XOMKY<QDc(-wNJbA7Q0KIYr7uE
zBVxu}dpNJ6@W#&XUj*h{Vc=ufSFAs$BRTl`+GdH4X^k1Tj(H?1{cOE&Jj>u=VaYs6
zo+shvTvd1d#Xi5j{$zT%{pUqn9wjTxN`DeQ<4=*qOS_4G4qg7J?NhPKia%Llzx|(D
z4tud5{p$BZ*WFLJ{ysy-ooRN%+XE>FZm-Pl_?&n^D8bl(??J{k(Y}<=a?Xvr8)h<`
z%~wfMSiy6}`B-0(@%R4fC;u1pz2>uD)53mmLwU8{wl6!LiZDOqDQRWDeJsDRo|(IW
z_kg)Wbi*Uw!`r6hK97lOFk!gID8?M!$a!FCL%7$2CvgpLzwsp~e&7j^oRDJ1Y%*Pz
zL5KYs(|YEq4BBrW{t$^^zK~PKdf<D7?*T7;i#kR#CZET}3zR3!w&~$s^SS5n&1|lI
zdFCAFO8sRQr8a!BmFX$AJ5X_(;hT9L<C+cYi|q}h6XKt8FECT!6l4B*b^4?e?`#`#
z*Z;7Y&%EcW(DG&GAFMa9&S16Sp2D_BFna!td7IXz?w_`-|Bt;5<A&Z;+2mj1jA1wG
zQ*CQRD&!;hH`vw+YH&w%E{olkSo)>0asR|)aSi;(xTFPt1y0Olep7OO|DlQcF>$w<
zH%vbEdNt!OUDg{DH(&2>n|-L=XMgOwiFx~KRP785Qcehb()>8R(R!jxWoq&@sT*(S
z{g-yveq&(7bMZ{oqZ5q`ukW+IyK|^=(TSN0d1ShaZtpr|R?6kz+E!<?NjdHC2EKO;
z6}f@|%n?uLZDzI<O_192Pa)x{t;LJuWpfg9V*d%|d8QeCkm@>X<(R{EBXdjXbLSnJ
zAtl!td5k$Lm={bsSX}l<<BiX)l4l3b_Q{^FOqm^V*LHb)c*9(mUE3LsGmB^$hS<EY
z7vJ!g^WN=ktIcBDZgN%p7V?>X=wL*g`ZC6E`wxEJ*r4*0=Sg_{4Cib1cE;Pn-}rA@
z_hC+LDBr1oJcbv?s}AhS{Jlc?VV_%0?Wb#t^E!SAA1I0Zp0+b@UOaO^9CL+V4x1YD
zHru~Ud0t!I7HMp$<38h1xg<gU$;x?GEVK@>m&#r^U-~)mD}%+<Zz>;z3KTw=9@rqy
ztjc&wp`p&Xf=QD9!yM5~_6PbBYaJdjGY6&AC>W*-U4D4fDm(th{oT)VOx9M2PPzZk
zlIua~G36O-|Ap@rd^^gM-zjk-C@3QSN71q?tU1zuIPQc^Oq@}8Akjhifn-D78;3AP
z4RfY1iw_0!zIZb4@_UztLxO?xgx|P$8TB*XR$Lr(;gIgXtroYc4*oj2>$B<sKB?D2
zPxmt%E;usFi2uP)hlU`&2Mua}EhIwjmVUmoZQ-1qFBv0#GVsO8b8V;(WRl>$@IJI5
z$$Fms;aJWLu85=ErTdf)Y%(}^n4z_9o9uL@#{rYR%&$DbIRBzUgHT-ju7my8JChl_
ziVtWRPd|LN?zLJ&;yDHlp5&>L(&u+hINiV2Uq)vaL&E$i?#oX8I$h<xlXt;=qXi-f
z|IKGV-1h#HU4b#rMaGV0v2F&P@jPD~oW3z$DHBd*PWx+h_%7!Hq2#~M4*pd*z!kx|
z;a~TIc`usy=l<KzRP&=u<fGmWRXbw^*JJ5Uy&uh%^(%gzcYxu*wXe;0tm9sN+<g7b
zNAsBE#~vTsHuuKA=&CcG%eZ>UhTyPc&TQ5NYrNwb%y`c8v#r?>?7si_TK2xwSo>e{
zoH38*oj-n2`wfr48w*Hy{=O6ZWm3U=rigOCqQ5H*?(p%jSwtjHZJ$}NdGqlNEWf$J
zybd4S70UFXZDXSP?&}2wEw$`%(M&h&j(A<~U;IJj_QyTSC9nSY9oK(s%TQ8u*JeF?
zL?s8`FVBe^dsN=PSj)ZhsyN@g&y|G*$FvU@I@R+nkbktR@}GFZ>ob1W|I5jI7LJ}j
z?*q#gE{>^cv(;W(zIWcj^yYut&y9!7uAB>r<0&_JyQz0|@)RDC^U5*d-)}j|tZWUc
zYUAts^ilmzh|>*^FU)?dQcP<!avA#01*~{cA>FmJFaJc`Y$+{ilT#NiIPOnUxWMz}
z>ytkH!b95P4^x`1Gru!t{MY{@K0*1+%FIrQj%kehG=3^=J2YLhwW~9HS(V?7k4q{Z
zZ`_-fzFFe{Un*-u(q-<st!}G-GTq~=czpY{RT>*}&A+e}M<NpMzGC{rQD9KS^+Eao
zU$lNh(rdB1_kOvy&iPuVtY6WRtMquo`vUz0>p5YK@!a;e%Qv{+JFlISytd+UdhPZ{
zy6m@;Jq;u}mhE%Nx;&#g+CNhK=DNs8rsHAXk6fF1^jngMdwzA+YsK3Ev&AO=P<OI2
zv){8t{Km7k#J&9?Z6C^R@~(5<!c+18!NbBgFYeiVcXE}<FS{F(WU=X^U4d|cXaKte
zLjWIZj&d;1yWI?apJTb#lzkNJo6E>0s+#|_z~-wm8`Cic>668GYzjltHn3lg`Tx0a
z%c-5OpWb-icle}y!xV|rU40jg4wU^$F*k_6c6`^nm+vmh?eRLUa{Y6K%-aUh%Nug-
z;t!O$>{h+AmVxVb?Y=kZ%%%*U&x{T<dPju?__6%ZTfSlKYu*idMOSvq7Dy)CcCa{W
zb3jX2=E;%`?|NA+PG4gGz;*j<dTjsg!YzOKqup<=Op$M@kbKY}Ai6Rmg^ek4>C+f<
zrqh?SXPw{**5EmFIQG+O{mFOdm>j;iEqv;OcMpGU_D$aH{<P6ZU_)sd&z&{Kj~g?N
zuKseX>O}c(SBA@P`3-!pt)JO2Rj$AwUgX;2TYcIor%wC{{FF7_S@FwjZszq0qG27j
z2fj1J=h>*wV?M=bedlwcMI8T!#Lre!Ma)EME`F*}{h+tug6(q-zW8;`cR%|VOi^iI
zoH^~MmsWh!H~WH&h*LEiZ)evO#=o*mKRkQ3i?rVMAD{f=*;Z`0VYOBA-FJrD$2P_0
z@mCzaanY{AdRh1F!ju}V3pw>O+0L^wifp@Bv+;7i<OjwL*KGgW%c#ygc_3S3+uHJw
zjQfl?PA<8>-a+s|-uD{ezH$bC#%=q~w$Cn7KVb1jppGZt;=`XY4W(OmRr~E<=TMo@
z_}X03UCZm<ud`KuEI05z*lWcglFe1(_~OdXtzUmJUe8zPGt-wp<lEZyoaOGMC&m-v
zYc;!cZav>7^G=cBwfg*rLJN!>Dn%E_zdkA6u$Fs`eTd7a$sS)jOD}(mpMK~s<ALf=
zcHbGNeE83MJBII$G0*QkXX>*L{PQo$o-XL}NSrx#!i3s_oQ{ahFoqfpai%Go8GjqR
zJYO)M@hSsX{1%yw4^6xCAFN{!_)+lkJeQ6q1E|hWo!YDP`gKf$e)0M#2RmzT?q@HL
zKXC1*TETO*+Cs%y1v`&Uum5HA;O&vUElKa#BEnbX%wZ4cW6b%>S??*|nSI&z`cI}>
zxd+}iy<{!ySaWXqy!LLG>jSENxw_6)eK0jRT>80&DdF~}RRy>Cl-B1m+}OFenms^l
z!J-;*ht&3eEbbd3^)=TdUz^{>^EBkZFWW~yT3%U+=DxO<eCK^9+s5Tp#Vx+|p(zp1
zS01=gb?(2w?aXB-H$Va}kuRHBp81>Qr=NeBwy>_*@UC#f+_$ppKmTBMP*sxZ44E&n
zUi3iq&8i(t@2)dGo1G*&v#>ze?2gNh*-T-JGN<Q8$krw_@UM^!(CgnK(bgL+y&y^N
zVb;O<qGz+t>|~j|{XUP!#E-KBm#VXI^)jwk3Kl9)O<!Xvu+HWAbj31>Xr?)==Y4;C
zN|%n)+|Kand<5I8MNfE@r_B4hLGXZ;<T{tb+TLA<jvB}u=-K)%P4&^SboXrvL5vfl
z8Mb|noUld2+>pm5INqY*#J8^V#^#CD$IJS2XNzuI{HFa`+n!q<v)O*{T;u$(^rn`P
zjKrc1`!>IyJ=^87-gB|=e@P0(pdN?i`6yLiYqmbu&NnOM;tvQxdJG-&8;b=?!{ipI
z2JI1AcjCzGzDGw-IGEf@@A1^y0qHyZIB-B~i6PU2Dv4P^9*`c0LLJ+VyjG8vKg}*T
z<1VuVE3qAEi08?0JEpHTwGW~T(gSpuAhAGqkqcTEh|zFD5Rb=1&y~lQp6NpCYB{hi
zIk7?xq8rqifONPf6oJ(5bYI5Pjiz3~)vXl0zu7n?*`dqll&w)}1iH<dMyo&_3l#I0
z$S?<`1ihQ6c^G0Dq8sZmF;Eu0KdbN*6q-f-K9XW+CN0>ZA;6rZpw4`JPW?~0A7Upf
zUG40QH+;XnaDAr3(e$9Hb0j+27`GN*-0;L(Vr%if3oBfXIQW6OoKIZrk{WnKex{vR
zouqV-N9Oi(F3s)-60)cEaxRNH+03YMc2)k(?50~g-_)o)@p&6l7IpPr>T~75^qH6L
zWvZA2ntHrM561}xY)ek4^hBxZpm#GDNleI$Tr?we7s$7bBB={9CMqp++-tS0=H{lf
zmt9lSzTPU;j1o$<o+@!;clr9rO*O4IH<iv_8M=7a%7Aq_TB-4szKU#J;@W9ai+Z(V
zS53KmY;ELryVj!hYpNtCC_PSf&NCEXU^}<>nON}NS9Q;JvcEqwaki#$`fO?WibFAV
z_hv5p-g(^e`u@L_Ju@vbyLNtkb$ZjG`0DS6)1K{NefHw;u9@}mrJmFFUoX60A8)Y#
z@5kwx_m?fL{B&;i-sw;LnAzslKH$5&D^hs5O3k%owLddMSDij*dw5Rqr;q1;x-0K9
z)!+Mf-s5$_o7;A7d;fD;Zg<);pO-<A55@H&)8gbl&6E87+&;3d<j$q{LYEgus~5jk
z_Lsiq>-AlaefIA4@1O7ZX`Fn{rueOGf6Cm;A3yA0Z$8)h&HSh4IX^f)|Jp5Y`F!G~
zGh7<(4<su-t}Q>i{QjQ=HrvlDR)+44>igrCb$?sl?Yw!_pL0LI{$YRiTJG&lk7Q~b
zqKi}KZ}wF#vC#Z1u5TEAuWY~N`zyje_wOxFZQb0rJYF(m-_Oh~KVM8u_uaWEwda=O
zX`d=N`TdXGE^myLF1|kb{H(%y-&0{8S_kD9$!)NkTXnNE^7^W`;YaV39CfXDztu>!
z>-}q{^K-24?=kvP`}z8mmywC`dwxb;-kSV-O|ezk`+K?9*4<C|H_PfS&%c5liz{w3
zJj@k;Y{Yvi`+ZHqz2ASbrA=PnSQE(n`H}K>i|6xe{_cJF-(5(W*+{p$SHD$^nKQj%
z@uceS1(&zX*;M^q?(l07vpxKF)(8H4JTJ02=V{jGLkr5!sK@_*5Ocft2haD@JM7PV
zleLzye}BHsclMmN`#Y!4p7(v%ot0-D413JXawF_l3lxj%oGQ9}ag*ocHJ>ftneHoz
zulSL7_}t>~&cp-1O0#CNe{jmXy=bNI`~BbVWxd<^P^<XDzg(Y_LaKADuI(vwejc@Y
z+3!os+^=d)m9781xqkJtn^Poyou9Y=#FMGZdRHfZkUX*8mvjHQZ7crA^3Qx<_mkJ_
zgX6PxuYb+7UcdK{Old~=XLYNo+iibsiQJd9HTc8c>I-Iz_THN8{d-6D^>=H&O%%Ms
z$F_f;h)TQbqFWIq_y5U-bg<i0@!#4}mUjEw+j9>0e(YDDEo)sSwJpB?PF1sUNAl+@
zMr(8z+ush;a$Ymv|4!j#&FE!5A-UH#-sRtVGR<1<_TLF>J}b@OoPOlo0lQL@Z#%ja
z*^W2}Nj}I>cfZ44`(fo}fz1=&UDy^H)^|gr$Rx|e&9YkVlI7|7eSyixX2$L=OZ|M=
z(@8?2F=RPIKGV&19^s<h0gHU4t`v*#%v*bZhhW#cRcm%{jEh*>Ja3NS%9xE!J0H(F
zz3{-4vO~GI_OLE~EIjw(aphfdJs00@S{89}(X&Iw{)YW_-&YCm4>>t0GLx_3OWfwl
z(_+QHM9pW2`AVh!yd1bR$Te7`F!%ZANt@4~D4VO^Jze*_@mU$cClke)YdVA*8V@8S
zND4$$e!nbu?(=h>2b#yfoH>==pEuXHH|zJza=X8mYD^wk7k_Pio)X)AbYYo+*tSd7
zZ$zJeH=C*zVz4=3?SV*{b87-J;=VmnE}vbsHG3xaIg6WjKK?LXW;I20Tdc^rLmzBU
z@4B)eFW0?uVg1=xp}xz%<re4eo+t5vt=aNWMUM2%dr=E3qkrta8F$&Rip@k>*fyL?
zla1-9L+hufJid)yQ#D>{n8;{uQj<8NVZE96)s<r&&u7T|{Kxk!W9@A7QmIEW;!hu{
zGlwf|T+1LTRi>BuI6%~6Ma9(4mz$rb?hfC3%T#u=bk(IL>Hi)rJ$L%Hmf>r&920Zf
zA1w=Kp8LPoPC_m9efrV-koD^xa{LnAesB6yclN0>IA=&MS=kou`N&ku@OS7sm*vL7
zm)E5lAJ45f%enRDjNY?pYOlAdsIg_tcD=oC+JTS<+zZSDxHzV4_*eVwuE&<>(#@KN
zW|N=Kty+}4>I7fPo<(H_anCMk%e{77{_X7?#;NW9zXwedIvi9~_Uvb)^W5yxtgv#f
zdy6uapP!h&lUsatxxMs5jjR59e(aiQF|&U4gpidXn%BQg$eka)XS4p7_eBpF`;wmW
zDwn_ENlCZ-_5+#{Z*=%QFs(gbd`;y|kHO)hD~YkPmyOH())suJzV&8jg#G2KRWD~)
zuw`5>HFwy(`gVgJgZShcjyXIkONw8bzW>z6@Tl(QnNw+JXTP8Es3LyD`a^y-_ZF%0
zygG3{S$&52{ognC@c+(REfQZ;_U$X@Wy4cHKKzd}E6u)o!pQscwc}aqDsQT$&(E8D
z*6Q30<Liabr>VI+s~Q@A;7*F+yTW>p@fTxvqj*D~gF?#1OR48_J_$yoi+XxZ*Lt}n
zH$1kWY{}`s-z#N%X06)7o8g<A>9sB8=P8+|f08eSTz-A1?%5;h_a*DLX57)x<~h4#
zZGqU*%)@NouB?gKeC)}!vYU|^_a=)8%t=kD(cX6VZ|$S|*SwEgNK8-?X3y+WVrx3&
zuwJfxrZWG_M>%{S)}PipGjXjl|J$T*2bR@5d2xU9x{G3aE!@skm-ODLJi!&g(dU_5
z_aye)GQ$kNW1lvf|I@v|{@@GC+tqf=^Vn*p=NKEw1+(2-u{3?j#LcEAflD%3TaC4q
z-TO=Ld`tg4ZS6UaC#xgY{JLd*Wj~Lb%5qEVnX#qEnfqQ!rN`{JJoEd$duLf!y$iot
z*?IdMi?8(>pW^hMBJay>+Aj((uF3RKn)^Cx$);SlnVE5AA_rz3_;-LsL12@F^_0M@
z(C*wxlNLW-9#fUe*CRD~@4H)0Y<p$5{JdgV$a7_F=w;72($ig(@0Cq@{cGKu%hI!1
zVshGSzAstHk<Gz%{hH5AL+k6`F5GKp?!42}@bS$vMw91H+tj7a^UabsyzEn{W9vFD
zA8L5CL4w1I+4Q1g#`R4*H&6KXCC=UY=3lukp5-?(XHCn=y#AJBbIH3iKi<0i(Ppr2
z>^#HH-g{%_8}2zQ0V^e6$Q6F@x9IY%&e6VX`1_gYd!}nIXYf9Bs?+YCrf0r;(G13)
zSy%Mi7JS+NS#dMhZR29WZRKhm*_(GXOD+xDn)PhS;%bX`Q&(TNSe9~UZF|z^>Cv<N
z6y)n(nP#Uh>-(!Z@%)YUm|yc;B9~OCT@Zi4P*C=)R6*d-(cdC^r}|wp_T6Axw(+6U
zXSZ{oTD&jZKKS9!t2^~<Yi5RT3G2EiDY<U0CHDue+q?fKF1KB`xpUduM|o9u`CcVh
za3A)qs7hj(l(so$y;rM;kUeuy%A@CghDRNoBo^3Oqz5XU?3-&X^L~-*qWL~cId2-T
z-<xV7lqy~qUw1EZ;-u;G|MOgNo)pe3&Um`9@j&STDFwwtU)f@2_(wM0owp~g^-k}a
zovR%_{7UIjGg}kg?&q$w*4^^qiFc7)T=54UMjNb;*?i5-T-RpzpFfSd2X`jRnX~SA
z&@*`sFSA6!2lvC9*S2ry@(EKasJQ&)*J)W(=61y=stoD>>R8zr<}mV@Dio-Ay{Qt*
z4im1MFKhih*C=SWo9XjUMm4K7%q2g4e10xX)I9#Cxi&}n>D^ys#B_HT{B=@sZCiMS
zFYURO?70o?bAx*(sd_)r@cUVGll|2XTOF_3HPUB&=k5JpmR|Pj=k~T)pWd1GMD*{?
zeetVzHcxrup9iP&=lC)ox_oNw+eJTbupN1-pp>4xCi3R?fPxhoo{!QUx_tiJ(Of3s
z!NGjz)<pT%Zj~*Mmxinfu)FlMqfSQj`L#1K_Yb(uuns8YzAbH^<Wl+T;DOa1DMknS
zw_P}+^YGC0B~x|h<riIezeAa?)Q$1wvdZgP8j5kVb7$2RZn=G{<+_vmXReyf9i?rb
zetx&NI__Z~^Wy%Jy4dGJ_6tr<x8HvDX`8T_o%PI^;^O9PHpz7vu3q7FhQ<eCPsg#{
zV~Jw6T(0q8rbgm#*T_EIhdU)Z!*|4WKkF_tU$N`Ofs1EKRos^9TfGj+4>*3-?@9ZQ
zbAqn}99`cgI7;%w{V~mUmfx82_s3PAGt&;{v2QIuXP#6nbc!h_%jqSD>WsIyHi`Z&
zD_UZI+cvM^aR0`z^+Kx-f0=KU-S_#e*)DFSIWPY)s@+>ITRvr{(C+0M*T!y0S-Ckb
zw9ChF#?dw-*6wvTk{oz`oMkxMH0N#p@zneomGZM*2kc<>-g1claH(BE{pYX!`)A45
zOTVvKz0ifTH__#EJmcZO#+|GhBC2hbv3qjn3!S++^LPH6_j_#C@BCqNnO|Ofi<7W3
z*HqpzD@_^S=kCw++?Ib*%Gr9Re^Zod%B3$eZ*Jo&RjR1_k(9Q4ic$4Hr{^B7N41&n
z|7S6F;@zCqv*TsUk#e?eibuB{sBdrzO^Oi8zGjhh<-E;VwLcG3mwGn;IyX^X*XG%W
zymQ}9heRBD+N{67^p;a!l6c4!KCvkW9~@n-<goB&NpiVB>vNei56j~;w&~8YNuHK`
z?9|OyyFac~TJ2-Zb9T-8{Y&`go_l6-R@!{g<%u4~$G%0aaj)AmtH|V)P3vaENlnHJ
z?)e<F4B<*T5YBe)sQC5zrb8c(G+g(6-+Fvz*^g6Jz9svbln%)r-{EjUZ&u&8WA5_T
z&i*m_#;?93_rc=c)dee$9&kESRj}>oI!DVV514r$ik{n1)3e?qIOFx-I|me>J*<y^
zKPSAw({27`|DrqJ+M`wM|DIB)miqW2w%WM*ACvl-PockcUZg#kXaCpk;GFtfosl08
zZ*M!dOTqn=(SdKBrH$ME|7p-K=h0@pEI-Szj-yycOJupsW#!d1YxqsVYkt1{KF2RT
zT|+ly-cE*J+-DeyCYHUQ+;^HQUZDNf!KMx3dv?VC`7qho<M3IxxVk@e$0s#zuGpF#
zQl2*NrJUKH)oXf+wJYnsOqxAu&%Z0qZOz7iu0^$PTPu6CO?vholav`d_x^txS@<SR
zB~p&HV8h*~Vdp}2%`nf~qpWGc9F)R!ZefrB^FE^kf@j(Dcb?m0^u*Tqhx4qh?JbL+
zn(*#=^k?&$@OkdLcT8-&vGYNn=F*P`4>Lz?KOv?+?W@^MrB^B&c<%hWVf^fu_wO0b
z8+KedH_2f09i|c%v6uS|-z;9kA988F?8)2mzt_L4zgQWm{OZf{KdtLm^Gp0$x7ek<
zM(O{x6}#fEv08IwrB_uv>@Bx1S+{v|qN}mdw9YS<Mc;Q-|1aIT`LJxc;9|{%qO;z|
zwDZS@m2Q8$%wGJ_=jK)aYfA-0?tW~$zhlvcz1Dj^JGMpZPuH1P%-bTYF<pMQU;Ff{
ze9xSMq$)mtX8u$0G*o;}GGEShtJ#{$_Dp{+JxEMDb>?QI>e5F=Z*BRd*KN<@Og;8a
z;TS``|J=lR@+Km$xvoD*p7HzErlvC|F7!-F>MyDP{W;_KmY%dnCI`QA=^CnRG@g)Y
zz2nHafUe^bMky!m+137fVR`(AOuyy-PkZn4Jp9!5gzNg2f{96)Iy2Vw@|RZbUpDW~
zoS((tlYhq~?vwo$CGbi;yjM+Eeog*AgITsyH>G-6%ev~-Jgu7Xv|{J;Z4+C1Pr9xv
z-MFoBu45V79`4xO86mc8OV+Wr?Q_{)e0$sbf>#&JdaACw&0RI+{4Kk_pwG-}Z9W}d
z`|{E~))jNPUG93jR3D#d$2V!ixea%@!i5fT`+d%ub#K$$$9cWc+wyq%Cw+Pj8f|b>
zd%AiR*MxICACAtyf7*WT>GhjLdl>a%mTEfd913)-dl00=#&wEeJ5zp0{{3|oKbIcA
zDf~XF{m-G-yCgR7T+li5yH@M<{M&`<W#>1~p7lh`?cd6S4Pw8SgC>KIA_jcGV=x~K
z7D!6+OBQu8@-tT_@=Y)aR`$2Mba$q}<W(i=zE6dj?F<)`ol{r(?CJJ4#*XFXCeYB6
zf)aC#vhc?yo1+a9AB-0K@0{NI;?5%-t9eGcBJ2%2{(d%P_@0nv+gZ22f@K<b9B5Ih
zgR#*?qooC)0TD*S2dN4D-(BAIq$Si(*~ebc!d=%|pjaUB!D<7?1hD#=bcgJen&ly!
z;4v4YCdR$?Mvn}iN1xa(*i@YzJoBf-gYfWj=8&{BhB-6g>moYlGR~ZK^xVv(6cuKp
z$z6Xpc5l96>3v)x^p4ZVWc_|d@5bVWsSec+ps_Rw4+&<eKyQhyUa$cyhg-%Q#8jW;
zoT*qK>hSbw)&tE4`f#N|DMD^*g<!)`6Ta~TWd3CQ!CzoiV0eJ_zs+>8tA$QAY`(Go
zp|J4x8GEa@Ud?e@>GsIA>&@Ky+l@hT!e=-0zkF@~kh$}s%EJFGhl36u|L+lMymrc|
z?|*Jvzp@oS``>7n!BoK>?w``k?F`0^%ni|v&5XHhj6y1wk{%Dg>7^$!y_u}}Ha6qB
zk%r;tJZpmk{mQnRuD$5&tm<5LYVno&MFI1s{1a|<+MK7v_9tzDwvY^4LE-<(1AGmC
zJ0~{?H~ee>4--x(U~}s7Ih4Ox8tiKDcw^>)1G*Z_g_a+7F1OkJpzcbs-g0nEKLia5
zS{&Wg2;w>LbTm#$Um)aVz_Eh4gk>57c!bj-NK!+@_1X8<*=KgXKVxJr6LtCzH^=nq
z{jn)*{xPZh_O6+l8Mbvx<*(;UpZzpE?R!(m_3V!9+m^>aSUCTW$$j^$?fz|}XVX{T
znzc*Rd&c3oSvg|)H)e`%Dfy_hEb(d3ved6e&t~PB`gUh6-MsFi;hXnY)2__nO*@cj
z@^NZM;}gRT|NsBZ%?j7a+_!Ve?0+fZzyCk1Z~wn=rS|q&!uRI9W2yQ##qHUVFq6>p
zdaJj;F^Y|!lTvoj>+iXFrDsIvX3g+Cy?xeX_SQ!?FZ`aWnI{#hnYqhsMZ&swQJ1Ia
z&%EfpW5>P4%oDCz>z3xn9OQX(_ezaLX7A7M)46j(PgvRATfNV6d#&l^?|*Ol{XSu~
zqiUznv-zpfMOWmdGp|g|4e0lpDLT7s);WEjqCJPtPAz|PFIjBO?ZmY8_tN4dcOUYJ
z*mKkEx?u!Q0sE%j15pWkXPd68T_|~Tx<h`#{cbUJNce7&&=7G=yS;Aj4R3|-rVDRh
zxcw<EJz!q`y^XhPOq1tYE`D2Q`QvB7r&&3L5()f|rZ#>mKG=A5&WG9C-kTYR<r>fT
zkAD5&VDhWkcCxExee5=yJ^i?zdBu7w{WvX`ui8_M!b?|9d~EkgfVuS`!++U({~Nx~
z`}Y6wU-p|D-t@QyrP$5hb?wcax5o<jcYk0{+*7vo)GTMIumilkk=ruatyV;z<xZY9
z^ZSAu{MP$-W^RhteE-(?Xsi&cE!z)H3)UZk9^NatTX$wmRN6VMH+NyU)>OS`bKdn=
zU6yHfG`_g3>gj{3r!uN-brUE3&3>77#X4EbIWz6<qlBx4x?0H_vS$7~@BimXhYADF
z6j!--?B`g`7@sz19r(}qXT{Ua4)qPMlT?xvR3&~0GVAaf9&ob!bX7C$+^n~eKc5|V
zu%B=B+d21tJ+1kC&p!Li^yynna<e|H7npS0O{aiu&)c8>ww{{tQ2f1g&YkAAwI?Un
z&&oCosPiuO3rQ2_d$VD$%%|sCpPzmyy{Z4|j5!;Z>qJeDyqkgC;VBlY%o7D~-<oqs
z@Tqlm%KCt0u^glDTAS-OMnxN6-dwWC_|w^ohr<oEJXQwoYq8ww>zAsT{_2w;^TDJC
z%XdnjnYDH|PkPv%L;rpAF0G9Fne$)pmF54VIxmVBPoKGa;T3+{?U8wbmv7EjJ|mNB
zCso1!U`5FjH#Tvj(5mgX&)ZmO?RFLX`)QK#{do`1@EGx=pM3u!@On&5K<Q<9<A(d^
zV)Fk*?NVL;V%M|^HmL_<$&K$Bv_6;Jns(}pz=M(p0SQtHg3OwS9hNkT&i?sps%6>K
z?Q`zzDl&6jCeD_yXVUM201c7f(~d0c)k<Ki*}C&*_Z;H~_r5&o>u+?pJ+Wk6{Jox;
z`%mh$hrOQKZy@db@N34B=;lw)CN-~~tvlmkV(KEvsN1trKL+V$y*m75P1emv%Z%Mb
zRV5EHSg*RzEY9@1EiP!oi#7k}r2fp|(slU8>E57rkVoUze|G7|v-#KOMjc=&{_>|c
zZCm=4toT)ukEPlt<uO?O40$Bz@jP?ET>qD<x{`f%^V)8HTr_XHx#(o!=)wnfRuA7i
zs=cpk6mq~)yQHrD!kfyHjiOqNJQq@OK$E_V2~#C3cys?}xLuQ-AjB-BQfV~7?0s1D
zsXT+7-}r>uw2jQ>eBU7}<~H9i@-ff0{JWf+<-^bAgkP1px@D2=)r;<IOlwaRsI}>z
z_g71QzoNF~Vw&Iotxm_aqbyIxx*L?|W@Ht<Y?FO<P5IS~>*q2a^<?b6W`0p^dBMul
z3{z(?9<V%+)NtR?LdL`L;Tf*84TsX>Gs-+U=bt>YzCnE(Pp{e8;{VD&Tm2uup1(LJ
zwpQ!Ds7JcCLu(pSfoy=NhvvgyVp=<8V&B}WxPN+v{<lv{yJpI#t?yrH<D8h~crw8F
z*o+-s2?=&5{2Ss9T!?vhIPl0D{Rc5jr>q}Kddx0gT|4QhLDuEfsjAPnL|zSArg<jm
z^JMem>rKpg=I5W2F}nPvB9-gfvUjr!pXqElQV;U!mi58cR$7#?wH<x_YZ~k8ZL^I(
z8K!eAUlrDR!Q_$CjHQ7)Hv9}ab7{+(xLbSb(uAKI9|5iPn#L25{+GdmGlSQMm5pI@
zV|BxXi#jrn;(R^xuWjqv%u#*q)r6*VhXOy^Z{WM8dt}G8rD3f?eiOwX{h#{KiFNuj
zv$_*XDn;fO1a|PgVA`;*mSYZ2O8f+-{spIgZ%|x5=V@m3`CIlWVQK4KXMcBn?fkaj
zuh-An<u0?A9XfjEKxBuZ_z$aRGrAPmiokK3f6=cfea_;Uy&sP)o_)=QKY{I**JlZ~
z`?)f*pVJjCbn$Giw@OQ0mOOpgN$qV`OBV(?>;AlAc5$|`!J^n7F^r4bPYTtfPTl@&
z{@x20miy0lxnEqKIPG|Q_1|}%&Dy>5nQk%pHn2CUI-KeVePLggo@kM^FXwUJ4AEM#
z8~@xb4#h=mDwJVw;eP*r!s)$-UVq@`C=b<3O#b`cujuka+kLD*+MfvAmpqVu;If0)
ziR(8iTBTRW+}$&2WrtOebeFz)?}MhWSzSk!j>i7pa@P9W%(x>$`xz1~4-33D_PPFH
zeRH>Ra8Lh-|5>~2W{2p6N9~Qip8q&~qJe4uQqHRz{v6OUGI@Tlq0Askvi)HB@4VFA
zk$Yo4W-R_Cc<y~w$P3;w6BUszrDWe#<u_xe94bnC%dv7n3%BS@r438}Njyuj7Wm(t
z`T9&^goS8!>H{6VR_kvn-cok|*z?aiiAlBbNKSfDz3F;%#?y}v<75Q4|6I%aeeD5}
z1ik|P3AM{PDn!~0oVRV{-ar3u{f%?;zgH#aL>;_2-9N^)EiFy8OVm((d!O&c^jUXY
znuBKipU5@W@3HRN&mPwd_wd<w*=*GNF;RY9Z=%(n;~NhIH0-UjRxUQa<2f<tsFL~r
zsi#^*Zt}kCzLVDXI(pv$z6mkW6+&VM=XK7MTvz;)<=caroyV1~xg0)s=7`YthD6J^
zFOC;~)#epDzJ5!RZ@S34yT+a0`%U-r$p4G)Rp&kG=`)x6$!^h7HNBrTudLMNZtu{x
zx?WKBd&%+h5?dnMdCQUn^(J~uQdzW4eWk|E`(~eJ>wN!E7`stToBP^xlgNj^--q`o
z1#3+|w{m4sqMi7oV(Xjh)7O|^nUuWsZ~Cn=Sx4>}lRS-PG5*`Qv*9aug4PA07wjgi
za%>jf!7mQ|KOt1ZtQl1Q!M%LT?Yw5`d6wr+=^UH2UOp}VK-tsR8;#>t|4et9d-21D
zopXzO8vh&esTEY;;h8R{@t<YB#ftV4ImTzq{fx;C#~995?GRH>U(9jdg)2Qk<Ls)b
zrwXOdZQ8O{FsfQ2^Uh1=a`Qc#Hxx|m;(5w^{fUly($0zx{2AL%%<s%U<uU2l0h>EJ
z!!NyCC%s&nxv_eC)&1oUna%&0g~c6W{T=qJbY<q;bJa$#Rk+-PXH-l-^nkxr+HdV0
znb|@jM|F+tJQivg-FY=*@yYvVZ#^ivwr9=f<ofInAMNy%);{WEsak*ethieIzVe#A
z2c~F8p83h<STSSIxt;aq;?u*NkG|boe`@XKv#T?9iCHtdD|*>KkX@ixVEKW2Lde2T
z2UxGZ|FhL9+U|xxeY<X!X6_xnv~3I)3wia^6W7VxozK)*yEC$4$DMPQmn*HmonG!K
z^*?*#xw2=|^16N%vn^HpFxmfSz39t;WD7Y9i3+(J-`kFOCZ5&_uYAy^8KQBgd`<D5
z4auhIY&GUua=ulEThoL-Z<KU7zU!&a<G)_kyB~jyj=jhw`ewO*&WkfjMdkC&kF{?*
z&~LWy(9cr`IF^*j2Hd<JGBskEk-(1f*Gc8Na`GJe%Tt%C+?zgkV`QIbor6`(zk}>c
zp07K3Z`;S&?e}-QfBfOY#B)1WKelx*)X`Q64o~wx|Lb*x0b7w@qlnJlj*XF-J9f;w
zztZCA+LzCabT&PA@A~2NM0}p+^^(=1hI+RB`H@HO{C=x+-0ruIhSD0DohNs#-F<ek
z@^p*o+ZQMQFy9wwP<S%!x${hZxnTYCJ9aMB{J$mXf%TL&&}7Hw*;3!@c3<#`Y+O6}
z;)zWcW!m~f*4^17T((Q}jN=O%Hz7WTF9*I)l-MtnX4-!yz5BS~492JREc1^a;d!3-
z`SYIMw%tEICb$2$nsTj7QhWKLCox-!xi;KX7h7?F#p~JOmI==imOs25$uA#$|4!4|
z?Kj;nxlEK_yJSz}-0}=BF;9!#wZe}A?)_Ka{YX~xkGlA_5})&4kCTu3F8mYla+&6>
z$3NtF(-l=c6CW7enW{ErZQ}%9-=IwkXD<^3rAWV{2|W8BI_>;@V%Lv5Zk~Hyms!2{
z*yb)$r&4w3(VVlke>0aBG(VqfAr!{{bM5Qbjo;tbMFa<nz5F~k_V(sEjq>xR{W$Ni
zP{ZiVx353CwBi#&V&?rYT#|8Ne)s%$POQ5*lpDjkl;?zNP0@O#WAImI>Z$B7v3+R`
zX@)Xa>r&qosaHvUKYH@-?#lmdfi(;#tK)Ybbx1V4ur5vVv(`4I`jRV2#wm}q*!P%U
zZ~u5(w*2dpCxISahx;G>o&TZU(e;^2j`6uh=JhS>ZD+R~o4cT3+N8~n>B$WX?DzS_
zXz8popKDTHe5>2``{RcEOMBKld$(SE)>VVT2R%Gfr_b|sj_wJntjoQ__4&^FoAEnB
zD%KYK{w>YFnl)7=r7z=e<(&td$5}V#+`oO5t9D&y?q`{vf+Du(Pw!7pj=nU<*zVqz
zCw?5vIWj8_T-ft1Ac0XqkU8_@0q^~~<!g9WE$Cfbd41~K>}ML!S7*GxR(VS|pP_E?
z+0`ckHlFnV5jnwQKHI`jfwTKn{~15{Qn7)f_WhpQd|&Lpa%f(9SH<a8KRy4>%Ndie
z9{x7_g1wQGl&#^#gy)-_balmkzdw2|wYBPeNyUuTpYu7MXNxY|xzo4erI4$Sa?OUR
zT$Q`LjQ$DlEbvrq<L;icNt^q1wBApz73G{J`*NByZ=IfLe{J{NxWk`X?(O{`tpDbl
zd6&?qo#J7?&m7HfX6WlGxMsLgt#8ME6=z9}GvC&UUUMxrd$z$%bo0iutiSbpBJ=Le
z*>Fg5%EP1O=d+(h*ca%YW<3Az?T`9<+m~jx^RFmAeyn-k|KmBweZGF^j-K_`(BW|8
z>|_4_GWCu$TyUOomrs0Fw*RpP-t+_QyJxevw0l0OWV*HcP|o^A%a&b8OUdc8cyKzO
z{l#y_X@asx9m?2tTvb-oEMc3p=!i&_rmgvsONaVdZu7HN>z$k#$8+Jy!{P+J0u_N<
ziv;*OB|OsEmL<lXVm+LeTw59#@pMX2-yNBDwX}ze-JkC>)@Bz~=bba}@cSo@H~+F7
z?rNH#($4sG+N8s~-)TV?mn<-l$mpD1!?}m|PPl~m726{Y7bG6snx^^gJ4f?0^QX%b
zcswR*O4N7hLe(p*;3;uEX2+Vx$i{9u`P6h!2VNl#)bpRHRGqjwE~0GL$7iNrA2&X~
zqVLi+#o>SFp~?%t{4H$M4@}!>Fi-xr@m_PT=9fR-6$&llWzl2U$E1>^Ai%REqgtHf
z-4ss4357fb8|#mWS{ZYnapZjWczxuxAM^JAQ{VVm?Of7x9UHer7JEd@A6+YEZ@(Y#
z95n9JXd&qlQZ1TZ=5+B=x%CpJi;GSqDLj>U(I-<Z|6e&s@A;_>5(3P1Mg`_VyV)&x
zB;KbV^700)E)p;S?G1U`sbQa_V8`}r*Tpx6`$9y#c{&<HQXAfX7H=?ST-Q636|!<F
z^K662%q?|$v|`!}4)}|I-Yqa?a^r>NJ#&Nqb}$+WII!*5wqo9F{kr`cVVv=^L%2Ba
zaBpF=u$KO%Wg-LVZ(A8FxT?*azwXpDO`Z?hca`rR`q%LI=FFgislhx;B$#U_-d}kj
zEWyO#A-l#sX1<4>dO@Q9Y8wI%@HEWln9ozgHixl>v0(Po#|^#E@fJazjKl&rn>10Q
zvyv~mKiTrmVdG)>$Ge9Q<l>IGjk7u)eeZGOT=G-!0^bY%9;Ov%3O?pIKQMYA{{4qq
zUz}#Qu$JxT2KC092b>m276^PWJ0Lmp(g8n(D#;H=<Z9;G)Rd|J`oSeB%Y0|<$5*#a
z3S?xXxm4Mbk{r5xd`dJoNN~t7`%YBuJMyWyTtqIO`Nes@eic!PijRe58Nq=;+V}tF
zpI?2sPC@R#T8GpJ74q_I)*G4|0uQV_Aa_7}PHZLH0;bQ>2flw=&9GErgX9AT*8QxH
zMYW>M8TFZL8v_qqWDs5Qg2n61z18{MdTX4Ho^_bW=>H`2{Ubl~ROMxP)zhP<JimKy
zX;|c^6KmZ#z<X>QxC{ymKUs)uP~N!v!84BBc+fbT!#|!2GOw!>EDZQ8gg*qIQZTH$
z+{)#?L+J6|^Hy{2aKx|`l(lZQNM{a-I5c%e+lR+GXLr9dJ$ELDrHT;}amNf6NY3o3
zyrPvc%jiI#cl`F8(|45Ji|;eND?DE+z?`ID&0O<+F8>9d$(y}QZdUQHU@~E_&<>V+
za7^;yhJQBGnAb#3|Nf%XSSCGe(M-*mZ3VFmTBbdRYvh<KmWulAW0qr%`w;VkIZw-J
zj_;zN<c*wp%eLRMU02$_x^N13C@p9fqo$GO-yc&d4HK-YEml0;wfxbIlaKX5V`(=f
zAMmK9>rdZtkGWt;=5?tCV})o2Eru}34|3r<{)O;tW4x^_v$w9qeen^688f+`q_$lV
zyuP2s!uhk=gQ?dG5AE<dtQBASU|ZMqx`JTG#hZ>T7LlBwAjq7guvKz`(&Ss`{dbCk
zR?8hZa^R+K^vuNLMJtcl-Q1eqKXZ5D+?hMFc2*|*S)X?5R&DK_4<8<f>zK~9d(v{>
z?&qhL&1XLH&Q7beG5ly={5#)%*7w5sn+xpM=iJ(P_)Nt1V>NpV)>XdgzkkMUj=JZH
z2_fuutUCniSsb3Kt|;O#`es<5dcdZg`}&c)g70RYw@hCwq$V7u=N>CLMfY#k($h22
zrmmVHS1L8*wso1QsP~jJyYA)PkX})}`pit<D63NS*G<O@&wNOH_WS5(-{(?W&*rDc
zyokyC^36N#pqid3m*>n!M}04MP4(UVchzib>$1ZeXYG9=v%B8v%9s4=1(w?*U+4U@
z^0a4*vt2ZO=N{kRYntBP-F;o>-kytRqV}Jwu}I$~X;8W7Ca?Xe2aElT#J6O0|JDCC
zE6q9Htk|qzjnZzLYfm|r-@CN^x4QG0^WNe!Bi>E4HJ$tOV8gW&KMpRs1YViAgoF7F
zL!0-<MXtV|PyL>``QpakuitoIJh^7RsESR^-6EMc_to!aUq8L<$cIz$t5+yXl$cp6
zPKZ6V`0=fcRl3FORx|3qyvshd=<-aKhi6=47w~@2F0kAn@`G)M+>PtIk9jgYDR|O&
z>U8BhLx#vx_T0O_6$NkE`|LoL>DjYyt!K}^Rc_>dH9I9N_coVmSx=_n7g1wbueDQU
ztIMn(wx6G!dhP84m$0(P7QPq!0ZB8Kg)`r7{=Dg^x5f7dwR#!%u7&7Cp4wwxJ?rI%
zf3u%0)vUMTylehWy!i0?`?HtMHlHcT{cMKtzCR47s#mN_+dui-rPjt%OEaE!x-QvW
zK6~To=`;3+uQ&NURV%sbN^ZhD>pv34|JHODZ*g?(5-}9te#f4j^^HM1hwJgKxmLZ;
z<;-|xxz9NS-BVyazbyzHcpZ%u5-*+vrqso4x*$3C3CGWa>+Gk_n!ZauJ!<div+olY
zzu-Loc~<J_nU5+izIS>)@soy|#^F?*wA`#|n%PnzD_+DT9sfLcW-zb&DgJeVr?2SC
ze|lb<Z@*#nPI1{YDkp93e6JKYIryk*%H#&I11bs}MJh%g<t|7|NVf1Coz;GL!5h1)
zYu&?4-c=O5zcK%;vas9ifA;ms`|N+-S(9CobARvYJ?7c7c3v``<oADjx{p~%l#=Js
zNvfOgHYd92MJAm4@hZ9a^oxnlTG)Jj$~9i_Kc2Vw+IrJs^L6Ii9bT?%tkUwVXZzLn
z=Mv9c%cGhaqLYkgZ!=l{)#6@B!=&2#2is@eHfVk$s-^RNaqOpS+mc_Io0Xn^FMZ-;
zT=JcW_Ye2Zzw^;}f#l2|JA-{dYlR)W3<G$$PB>o8PJ8<E)%<;i&uh)^7>e%xb$tEi
zKha?;UzL0~blBJ-a7o~5o!@_s&AG8)rKQN+ko9X0X8g{Xv9eNhLFVhQ*>?B&&C_fC
zZp^fMB5IuU@68&GH!*B38DVah*Pd=zpkWxYrj2nqv*yYp3imBrWc%B#<nwMF`trKn
z_48Si^J{#BXS7^BxaNMeagklq!GM&gg?VB9K|fz`e%>n0XK^E^?dPKfFDIV~ES0u0
zHVs)2xjpjxt7+U<pUTXf;(p7>!S&bQo0rd?U0h#qs>1XP&*7zpuM)rCx_NuqKHb}?
z+-d14n<{syU%aCnWb^arxdxG$H!OpBAYM1RAT{?u(YskKy#{w@c75dti);JI>zlWx
zptsTd58sTmh{Wgp`XAkQm28iTJYG1XcVUI-%=^>Z|Gd~Q`^>pNk+*khn&|3?<$X6L
zAG}L;ZVYaizd7FPukp3{_asD@pE<(QIW=j9pw0eD>Cf8N<SMRf6zn}a#dLMTt;u_h
zGao-Kdm^RJ^?oC7>5Q)x7S~EPz6*LdEk%zx-*M;NZQ0XTG8i9;O5MSj=&`SWJ?huC
zTj_COoQZzfcMb)1)gSSR-!kd8?V{fG1wTP8hx$GdCs3<pf*4Q9s(tyjJl1V8rdynh
zJ97<QZrqgl-0v~(|Cf7aeiL%rsBOW$^mh2_hd+E>qQWx5+&->byRpvf%(U1$CORsu
zdv4S#zB{!@OeF5rok>5c7JM?|un?4ZoPCPX>&&|CkKBagc5X<`kUezUSUtV6xUDT)
z{MVKTo0h#;Ixm~+Nc9xGuroat8@Fe+eoZjZ-n~xNym4}h7PIw+lP4$XE<8B<n5cH@
zMT4AcA?^n)xx30N*X&(1=kl7Vn!J~9@Ec4%yv;D`<XgFm$HI5~0aY*d?p%U3Y&TXu
zC@6HaR(~mA|HI$g(npGUZuYz2l)CthD$#a74&^;q7yHnvW#;Sa68S&;pT7%EkqTXr
zBG-~BkSbW#{VVzErZrpETzWKjqNH@$db7|~sjbiX(pojPh(1hdkoO4LcHZI8dZsxO
zC1<ag&7ydB?u@yM3ljH*^EI55ociRTgyHf6+iTVl_Z@=gx8=!xZ99KDH(luP`-$N)
zv8QF&y2Lq4oA;hNS#>6?-ln|m;W3qTlZQo?Yw8Qd|6g`74C^fEkGLH0;||C(*5<n=
zS@CQL4&wQ+xOmd3%oqRXSe2S-TAp1NQ*mbR49RyIuAAjf+32(^lk7A9v%UM-%(d@)
zPVJ45IbF&ol6U>i##w(`CK{UPW;b0tyXr>Tbf?R_??h5s%O9Kz3R{-+*Qd>^Lm|0p
z(VvC)f?oXg*MIaUBEf0b=Oo|VQoCk5Rh;$~HqtvK-#mBOzC(P;+S~IC%a$FJ*0cXE
zQ@&08lI^GN)vsTkbzs%dD(k&cv!ya_BNw;m46%&YyRNRvmPiZXNlW$7UteAEUcLMe
z&x*&w_T^ogpeDbEB%7D0ZkolV+@t5_eE9R}8vBghe6P~<Xa4_dd#~cK{=seAwJ%JZ
zv)}N`+CHy$)0WNgUAXcnW2o~>y#rsLay&Qne7rrg>`+Vdp~(5$`|WN@=03M9UtU#N
zl#v}XFTLY}#Qf+(x@D)U_4||?FEXSj__6+Dx)fz``jehd#R{A4t71&uBX|0>U1vMI
zq~|qH#<k1VZ)Uq#D!G2xFq`eeY$GAV2dN9<XY={VM(@d7d{f|??dtai1<wEH-B>o0
z*<9qB^Q_z>N|kq&r<I79m9uU6TlDVEi!KRJn=M!31w-&$p$O%Krw7lqOX?-qa@;S;
z<Onv@HeNVqPe=ce(j(iJnEjdmZo8R<(#hDxYwp;7Q?C(TmUhjj&s6dAtR-*xRiB@o
zr!FD4bHjZtN1w;__5KCh4sLy!dSIeiX-L#Q=3C6%4ChX~VYgwM!+(bPQq+!zheL0u
z_RAjij*?BtO7?zVuu#5Svby+|{W*5-^9P)=-yb(VeAwOU`MHpgDcK6T*-|2!bN(20
zGxj~$e9!h(Uf7}KvySV*&H~{7(Z+2*>7;4Y_QdKRcJH<)ztQonxZ&{}TOoML@Ih?N
z`7@tg<@D}ozB=^YzC0~p&%@0klIO2qR$mkI?98!>^rAO9{pD_LWzX*2t*`K}3xD=<
z|1l{oCrK3()y2JcV-GA&eEQ63VO?TfX0er!{Kh@0XFP1mldt97Ps><#Vr%HyjbX3;
z`7ONo&ZBj&!0o$rn+w(zYuaz#boIu!+20rBv`4D=%G9?<M%@%rJ@dI;cJkxJ?S2k_
zQ+Sy9%dF1a+3?@-+yd_Mlr=G{wgi`5(|>nE!jsK+_7e@GJ>R%*xn*-nYU=!bw9`E=
z^Q3(h+maJ5=chV>r|^TOG5#*}%4giMV*azl`t@;%by2%6>}k1}-RBat=i&dDy>~u-
zzBTcFBmW7*l;HE<PA1<iHNUsR&*jgDXt~K7YTw205EswuKXlx}*oSS=p7{(9y?kz6
z{=k3DLr~;g-<CGbn@exp`qtNXch1}+dswCvT+h7t{>!nmb6Ky4xb>~Re?*!`KK1F0
za*rQ^;d-y?f1ST@kAvsFkoDwP7S|VY>z+Fu51n+A$K<G$cvG_mx1sP4=_z;mpPeyS
z?{_!-=uSSbZQ(oW@^ucCu8Ta*bL7kdQLV=lRc2MYbgA!eGCa_Fc9Kb2!ifor)xW>I
zoc!+m%)cv3^pmBoUvd!9-C6qL=yB&;&I&6$9{YURe|LSE(zRcQ)=%(b{_*v~S-G}k
z-{Znp+0|d&y|+65v!bHvL~&+UHk-tr`cnP%(Myk+ZY^*9Jm=5);)~nD!xuJOXSgHS
z_ExSuv4-E{M0MW%32g5`9RTRwoSO6pPD?CUK4=;4yYB*7I5Fvrga4iR*82aLY&g!a
z++&Gj7g93P_2yXX6~Y7VupDs^W0vW>Gp|&w9h}*0C0{fq{pE^zt9kEq!GD%L{5>Cj
z{C+e==`nY&7kKS}0^1RXbv$p5Ozqkk=X<SH$@SZzHz)Orcc)BM`thCh{K4}}iZs}2
z5*y^%xaGGz%;TERIA`t0*ols_CY7W$WT#x(S(A6uXkm}!2Z_m1E>{=cc-epCW~N8m
zGM0t9jY<B-p-C=U)p3>UTf6TVK76pjVdFAkvDoEF3XdgM@UrzWtYetRWXC#<;ZOd>
zIl+(QnJ;b$zr$B-TabApH#6(od;|M<zpZY^#HFTofPCY?)4=%dlqp|@%GsM7wGWMd
zIZboFpzDzAV61TQ^E2K`b-zU~Jt`lcn7fas#QmZflg!(Ci5FZS?2qLe1@3%!hCzI;
zH`nyWb30_y=kHW|R^D*x>2`+wvrRcBbTr<OxX|O1Q=?gO%xD2$(3^fc^K(Cro=Ih}
z7JI3+bE<7n<?F-6j0J@)-5Zw(D^|Hx>bhp06t`X}WEdbRazp>;C+E%Q8MPaaGoGt6
zV^H~LB#>JjD!$!T_JModes&Az!#j!_zBk@x^j*@x;wi%8F|o5rg;kJQ>Sgb8z9(OU
zteH8_F`PO!xq$1)$<=S}y$L_gS-Ioj>z9S@v-{M&+^qWDSB5jzGdxt&mh|l4dTZby
z9QJR!#qUo#4wrqkjOU0>_;KFB<gMuT8UMK+82{v5&{U~-;D15<1J?wb0Hz5Yjj#$|
zk?nwk3GbGUQ|BD&=Im{Hke}K=$t?Zn)VZurGd{XJs8cZSRGWL}QQD;c=QZANA2`jh
z-(B%!4qMm4AJZAT8?HAjV@Q7`#+1%D>4<@VZ*{3{-d>3vEIYU*_|`B;d>3<2VoL(;
zA%ldXfG_jhjZdBgefVSXIOmMXgFi8kmTxMrOVw{aR9L9(-uZdbpGzmT^OnU}^RUk4
zEl~OHmY{o}Z;gtvq=oF!wB1HR8;tdHvMr+BC#!lslIU2r_m_E|y@iw5tOM&4S^hIs
zi>7n^<&zZjea2O@*~kCE(~HNmC#H9WG}X1&v>x44{dJGfzdc=VYAZ|hZ=O<{^L)vj
z1nzfLW*?Xxgqd%-r#D?ld8FJh_kDe;Q=@RqG3$wT*KK8(dzhXv$2MGSP$`zMU~;V$
z(qO$X=S_CQq3<T?uWlUMFwbMs=>yYu{JqLx9vkuaq}QQa_3jft-tq7=RA7@~`zAK=
z(aayK7C25hQatC0^`G9~_AGmtWw_ojU5Z>WWj}8YyB@2F2Zs=s!dt=Z%mq>x*cWti
zy?tNJ7?k4R#CC#F<FT-Vcf&HT_e}Txi{6cCR^()3{;e#1=R<4PQ{@9535*jAE_!4f
zS`m5(H1x)3$iS8nXUhLz*36RW(*qpdEhypJV11W6n&au?9G?BQ8<|%yR)|W@@wzml
zF-$|ce8(ep5qtI(Ckh;ntUmG}M96^cpGzGZLpOuom*+gMj2;*-V1KYX!|hnP*VF}&
ztkz;!z<I8o{TH)-Tdeo=%f|x+&dJGe7Pz?u6|ntlRk^{rs32teyO#$n7RYRnoUlZk
z**9PgdqW(9?@2wTZA_D{Bs9F9{w8YaqJr&AR=Xc4^uM1Z;`(f9e5C#6)A#mPpFLJ;
zeP&MmwVC~KbMnG;?o85kc$&5JjIz93Vwb3XmR)6K+s{{PC+v(BjBvO6704gF)Gf34
z_{+s-Zi(LB^yu!-XNt=lyStuEKQA_G`}`Smp5A};VAZqu^JPY-pXD2Upa0)`+I|_$
z;|$_;aTAMnhP54acxHHj?Qh5f^9TA5!WBYW_0vx^GNf~eF>hyD&JZ(WALoQ0hYtjq
zOXkhHz`Vii`}H0Ftd{UIe+`^c-DupH&cIid!=keCD8t!P7R{&9x3kZ9l%&wdDR^5!
zUFG8+p}FRzduLjw&TYPWdfTk7P_LPMce7``o^N?$iq72H;7NJv*Gi{IEUSFc`E}Ez
zwttT`9KK|fIZW00|7oiCj9+K9EuOEnns~kT(xU?bJ0k1X-+rzczRK`G-{GGOxyjzk
zqMl8SH=NMOQ}E>_Q-VZ*HS6jAL}T~Fhdm5Wc@vZh><df_L=voT6?oO8H0b|zwOh7)
zNz-8m-jt{H4D1#b1{YW!?6T4{R8VAHYX0fWIaARYjt90ngfB3eAmDkbr$_hX<iB;s
z)n)IE%X986?ArPA=ecE3Z#OLqob8o%O#b&h&h|MQ{3}<+tzVOQv2g#I*T!mV%F^Oy
z$pzf(`K@QtaQW4ORm(V*XDj%ukGgA=cJ<MPHz}X_?)WhN{@xq?ROajCA|4Z`zWEGU
zOgTSPk{o!Jurc3Pn0=lpo;l{ve66{n`?T8`rZ#vt%x`37oaJ?g(S*GWRJsI+pDcRj
zJ;{#e!{1uz0>KYr2TTpL89lR%4FqofXMe#I(703nK@QV(o}y`QMGu5~zUuL*@J#!+
z^P0|FaidDz=!GuVQnhqL;>6D1%V^tgclo3JmnF4bXG_8#X1<y^G4rZsN|*a*LoQKi
zua82mK8vqS<?+3_%#m@@5*6k*7B*Z*79Wx6t+>xT=cFdLN7HGB<*AwNzw`>y3pTHu
zbM(Z4V2=OV9~dWGIk-jQlPz1K<<xqm58di=?uYZ(mK>?@@NAgeb><MulGQ5Q^BZsN
zQ1v~&j8P^xkNXabM<*j^rb5j9gWvw{KX8-5`t^C{%?<Y%@2duM`Mh;|>T@+};mzec
zCx4$=8@cr9_x00f6y9%1i?RB)BJ<P~{=LlU|8^dmd2{9MElrxSdQYyiJxj4Je`7d*
zuTW4;)b1|d#b#4|JA;m%SrjRKc3G))t$GNj7#H*10`aoE$AVgh%nij1eINGn$?$nJ
z9d^)8d0Lpj_<`Yr@&Px)iB8dRo(~NjikZz0=!@UeeY{UNJ%NwSkE!hV+%6@tmd0Zp
zXTCX1+QoCl@tURdY1Y#wM-#2<l{d&tSi;Xd$CP93YmTX%U1xb2*wv!j_x$<m`zj-1
zzq?gJ8jDt?V2*vz$|adR#YTG1=9{D~johELJ2*&7J0$Aco159k>NX!)6TS0pK`w84
zsFi&5@jpK<GHuR1oo8JCtLn)5oLO5|^9XO!P1_%pdM0E>pYOlgsW<m#_h(iHoS4(|
zEPG1kV^itJ0uyK1`%ZjYS(rALSHr!jGwJ{<gXnCd0}c;N6_)58R@5|M(|Y9hoVm7<
z?LfkVj0CL!3858^a}Q~3;Qy}C@yWflVR5NF<98;0CP$Hd%I_I(F<UpyILLD{Sb0iu
zPTLgIyIm)5Zn@oPcR-p!bVkJ*O#$CYOgsg>XSffPB+TjlXi>)@!?1_@hn8n($}Rag
z<D^8jKO1yT@BeJLb*|(T-y0X#M9HoxWxS?$|CNSO`@ER*fhiI*UT3a2QE;&StJRyy
zNlP!@xSIC++TAp>GN<gq2X*Hj{eSk>nd|J8LeIdIZ@L^sMKgZPx2wHTrc!dhv@C7X
zMb0|{>eV%pW#?4&tb<uXvON`zH`u+Y+x}?I1|y-qdEZp}mH#K7T+v^dqM55NI_0@;
zLfdAUKN}v_BnSwYWVZ7ZK6~+gL8{K-PnQLr*4iIFHRsjQ+!m%gOa%>H%q_Z%Gy5A`
zOr|$#$lvz<w%LktPVd79pY<L@Enq)TthIIDm(IznPTZ+HwqniEe<qyP+x(M@Im%S5
z->wo%J2QRDuY*|u8roAnP4%7?9rxzZj<@{V!`AWZ%-ML*Xa0s`GUblKm)F#aZmwj$
zZ!K|3cDGi^kuyA^Gq=UAo~`yf+-!H*J3|w9?<SEd_DWFud*K<8U$w$#blqQ`XR>gO
z6?oWlrqv;SfyoE84^|4AHEdcBr#)w^ZFD=3&~RS)nxtgBl-r%~APb&?E3YITs5fjj
z$!B()bnC>KISQ#7&Yv%FJe@avVXFVuq{}rg{2s6*FkE2bC_VOI!V+8NrvZoR8TbMs
z7ix-3l{$B%u;7JY!~2T(w4K~mi`r_A=)X>|OWT<9FU|PP%$0qT5ldGsO~?v6bmj23
zle|%3M%LTjn%&=PY%aay!~7)CxsmnjOCMd@vL<(WR@&iNZ_imtw9V|ADS689xw+ZW
z(_-euYHN+Hw-+lpPFlqCr>$nbT>8D0SH80CW4Ph@Oe4^Kf#wH;4I&dn?3m{yR!hEM
z`@(Qz<zKl6JW=%xUCUH#C1>;|$#UAT&XL`ql;>Kf#iHXsbDHe7n=%*Y%+6%>-}q_M
z;$MaaTr+q~*xs=%InvO=BlK<kjuUfB6BG(~FYrjP{^2ax(A9impN64_dW4#U33K$j
zI*!dY*UsE<{U_|MYc6kNZ!5h|=195xw{mN-W$CKjR;T!`pA*p!yz-N+H#AP7?RC0^
zcksF-&Us01eS*%Mnp6GyxH#Lt_7IEMlck~?PX%^39DNF1{-MCh$9#F>RBMK^xOa?v
z?0HNoFHbYfw@gp_ao<kY;pm)>Jti!C+yV29bhV$%SM%IgT4U6EfMxo2^-KTxehB=K
z%V4qLD=6@qRLR5RGI2NadL~uIb^VVTRMO>d-2W6i$x2yxrOE8;?YCA{ojqmU{ZlgS
zOvh2nrhk7wb8RmDtoJ4+?2ycj&C7~wccoiD)m#@TssHBalkV@gwtSRQIN_L)Rk@+~
zqtOTJ1A+4+T6o`GJaC&K_lxSm)P~E;;+dZ__OqGKJ~h*y$H&QPZ$o%PdyUuT1t$yM
zUSmGs?eMipZP|}P!wWnc(~gP;PZT=pAgEF!<nWr~pLvGb6_yIQ2Rh4CiY0HfE&9*(
zN4~)B0>ciu8+-+B$I3msls1<JXYhJ2xwIxq>KA{B?Y?!!&70V`fA96`=smo4s(N44
zvpL(|%2e$ynA;=zF5;P}HTU7_;%9D?KJnb?6{=zRz`vM%)~EhpRsm){=9-J8EI<C+
z9slV0jz9j$9MLB}dg-hxt>+s=W*&NOv)y{yp;qn2%maHF&N@ajHaEmJxbU<XA1GaY
zp3$B;=XUM%NBfpbxHg%~9lv?}gwczK^*n;Td%nI<agKP{Zn)m+i+1&_w?95F=jK)P
z)Mk6<lzC&uDn{Sc$5$Onv0&~F_{ZFk&$wH+%1Gm7-|V80jqDp_Kr`K%MkgQMt-ij>
z;$hU$*9=ik0W*IJPUvVXVDoEipS$*PkL=WYwhZY~-`H!WFVQgCH{FRd&4_uo@y-ug
zyCpn=*(_$cx;C9UQ+J@g@z9iMJ8rTwq_-{;dDdIHUWUh{^}N1i&C@^$o(nN=(i)5>
z$1orMuVTGS$Z&!*v&@&3Y&INMSa*bVH3#Q5a5LU_?do|yMavO1B;6qKp|MfY<FP<P
z@Q*F>`<OL&RyNM=VaQ{26FbIR+HWJ7z}`{n7*xSia&T=PYZU`C!>s<yFd-F>qSS_8
zM@}-_K4*L<ASA_#Sw`X)_YbxYv0cCBEfNv|&Czu*8b07>zVov66wmb)dpJ~n%?)My
zF2T3wDHFS=kmQ4P?svB4qzQeVpt(KGeT(@AVFR88Lh<qss!TNv6&xq2Gp~RADqw+<
z!{<$b2lx+&GF&e$cs%cJ!}~{PPVBMyRq@V25i~8>-KbRkTh^$l`$~}82Ya&<#iyQ_
zPr0b1d9<)=;%vQ}OLI&88}6@IFzI0In~Zf2wg-4V>YlO8_&f8YC&m|eB4_Zw@u>^D
zWX2vExT%ie`hhcSJM`WNH@ucoNm7tvJEe4im4SV(#p1t$4wqxL-F(}4z2S00F~e2u
z2@0K!jf&1~DNgQ&!lxw82K%4La5pjM3z$*y+G)>6rjRdA=e-h(!<bZS_!ca>Xv4U)
zmMyc0W#adf{(rP~$G?0s{eH+^KjEqV!KR-X&zoD%$Y*%XP-eo<ppvAZ&sLPZeurAy
zbuFfluH(#c50&cqJmx<A`sMW87nxQ2Kdme|(jakSKI7l{pPzdu22E$|I~C9Vhy91B
z1=|Pjb*@f(8!j{U{`-A2>)_?gUKKTQ8_A^)i~@Mv%hWRMzOf$I>VC)PVbOzp2PL*g
zi3y@gXIU*iY!K1luXrjyr-n`D-PbA6JT6BY(oz;J(u`$#Y8Ev~?`iiknU6&R`Fk=x
zO<9*X>Ez9Y3Z;?-i}{}vHNFyBkjRtyGqh~~rB21ynxbErpM1MB<y4`8vG->~y?f82
zShWm8(pFsYIO3qg^W<t=Y*))R2Iq#}1N98+ESGFp=gau+QKt*fks}QvGiSW;`g)+!
z)M#bgwv5m6`s^Q%b(_V%l+e35=PLV3;l}XBphBS<gGXx_DtcVOp?GA{_Yf{?;h9g@
z^r+ae?NXMyUb~h(!|od6gpS4v$rJ1MCe=Rvr8P_2NayUyjCH5YrZu~6W$2XX=w;M2
z+W2wmqlKTdQ-bzX1g6@@@jdDl4_$W2BtXF8vC`#4hO<dYZ$Fs1$n1-;R2E+S1MEN3
z|0@ONJ=2`ccvj}fw=Js_e(Y~p$>4r)J-ZgOkYPXo+oIg{8PCpr`;qj3H9=;B?1ME6
zTzHNg15N+@@cMY+qM4Cq+!>qAV2_<PHqNXw^q6yTO`>T*Da*N?*6HpE3?I}!=v-h)
zaDN}AAop?go4TdK{*p~{?bZstyj5z<Q}*YbIIYmI_+dVCR6m1|%08oj3E#RlKbu-}
zws*F~R<%w@7qz1?B&}h$ektpYjGw^`*-RQWg>{@f>^pAX^gmFPVD`b_gN*{G8uOjd
z*Xt9~bPaYsT65L(fc=4JhIF;rOn11xr#palsWYUfTw_-?KIP73xIl8INL2*u`7B2_
zZ?3xU2}=Y(QN!)Fa`NPEg`#6?C(maQV?NGY&-9&9m2rK@=XpmSegiw+U64ussp=Dh
zdy<<Eh_7Rid{X}ZN35!r-ke&=XY(E_2r_e~I=l^adKvmi5OiSa#Azj8LzIQ15+u%l
z7A@bnSuEvTpHpEO^W#RNjpDlZCF-W$U0?F)YylUWM6=Clo2>iI**wpg57;zt{o-wy
zTiFt<#3pnOH01L-rM2bxkz(eV(<)C)2!;#+UTSKbrE!!mf$zZk2{P;+LG6s+e7~_9
z-QBWA#^MF{AC7{(0=}yL%vKENnPSA%1eqO^7ML3yp6QflV#x7ejn>&ZZ)PDw0c+-2
z6FcW_7U>f_o8ieNuD@!{@zaibJ}x>HGK-DNC|&u$&zXAfk3aHgeHOc5+UxKO&GD(<
zwdXcXQ_uJqt>9dfHdD)e!2@Q-b(%j!7JPEr?Aa;dp#~b>3yJtKcm3>!?+u|0kKZz_
zV^K*`Fk@SC;>fY^uJ*oV6FomFJ)8Zv!m`6WZl9#rV+O~kYiumyxhs56{V1wsdEosu
zYr%qR9N_Q?Sh7W^?^WHLFx6B04b}=iQEZS7EV%fu*P#>~tqwMCMQ`7G&XBk5lAR!{
zW=8V74jWBhNIf9H{LyHGq~OXk1uPd%nOXb@&0M$BLjJ+8?rjY!Gk9cLa{daQS@Uz2
z_PXkw%nKfU(mmiOpDx7DFpn{eNytz@gzejriGthqnll__(0aYETSW4{W@r;+RAzz;
z+mE|5o;oyhR6f|Ayo*_%`9AZXTa~sBLON_2vy7}FB0uSxmrl5}zu|tvea371|2fn?
zxPM@Ng9{JK#Kw$Lwz?maynciw2o;zgkPOp$A!H~3IZ$T32*2$4E%{yImhRskKUKI_
zVQDnMQO@z%8$X-&`3!vp&(%T;8lR<QH!N?A*p`xZvzn20szPzZg-u7L=Vv><SrfG<
zlSMZ!G-ZXnflxwiHS>d8x_v?3M;umxM)Ix-x8CHuU@|HCp+4g>=8!wyj9`0%Qj#8P
zA9Z%Wd}2w3;ODb-dt{DC>)+>DTG*gaTxL`8JgQkL@WXl8b3YP&*3I83z92HHu85Io
z!yg%2{h9F(Y}T>QJZ!UR?gj6;6RWJwcoigfYB!#5{CdFpz)A<lSKN!dk2qWc4eae=
z-}zXn{*>wgkA_^W+sSnsZj^VH%{uX2S{qtx3ovsgERek<!!w6>O@~tLtc9`MB}}uJ
ziy3D9yWpa0UMVg9<KG+(8@7xTS(P6u?Fw@*+AR&eadOK6-psJWe=hfIWd9%*AZ@|_
zL*xdd#}S8JMjNDAx#!=#$Fg6)q51OG3tHh!k{5b>m`?J>UAovajYa?Zzh@etV)A+Z
zyI}3D3Po<h^C}PQWL+?6Hp7O)mi#*;SFk)$TUyY#?Lo{I*?yLZhbvA!Oqc0<R&-2T
zbP0#iNy#PdjhY9N9=JY;XizH>GMwPY<8nD}t9xTP<96mUw`Dwc9yfL~BA3pKGgE^0
zbo@*Mm(J7k+b&r+T(Ee|b4sJZSf%cSqeqLees<%3M&Cw{Lw;|XC+fL86gS~pe4^-3
zv(6#eSt@_@KHhKm+SA1)DyF3uk`Tde#T=5OlB5tT*|BZ!E(w+%mK7%o%-V8as5CI!
zqr~m)6H6)#Kc9ib?fElHejZ&=c&zVf!Gaa%_gvO_A($X~VCF2jhH$;3Cuhg?OWbjM
zY|yqwT_vM-iC6Ic1)NiF{9~AJZ=JfhF}hK#!G$M<i+SU=@IUvzFk76~6=eQssNkw*
zcQ42!Y)!9EmzeSS9B?7>>5R{;cpjnjn#dwwr&gAtbFvrKmApTE^o2OHiM*B2555lf
z6Aw>*bn&@t@q9syJo|#2HSWuqLi*Afy&KFKXX#w32~=V;n%Ed|yC$#nf0mliQlsA|
z8bl=JPx90SZ8S4Fs2}*ovS)g8#p9#%4u9zs6gl@})00L2?)dB%I{2f|<BN8~W)tPc
z{Dz7%UFRHZKAl+Wxpmp08R-X^_b&~)aVY(G>xptkdnRc{yUVj1|1yFqn}yr7#N_;(
zA5?C135YP906JLPQo}R7e?f`P<dcQqj4?CS_uJm)1Ch!THt<+zd8S-BSnJMU(*9`0
z-(UxJ1+hf+U%C(M9c)eo=zQnOy0Iwl*s+Hv`V-ZnD)uuszHTUI)LJsR@jugE20cH}
z4%-QHLB7*8y7}{Rqz!1opndC<Ug2!z$(vZuDnACdP;53$`Zj&i*+@%+Kl}&IWo&AZ
zee{!I>h!P!hY!Rga21$eVCitvQo2y3AbfUO#v#{zJ1z^p77@SvTX*RMh4gn8kDZPd
zGasnl#}vbGg;^!Zf#=Cthc&tTL}u>!dF5&b=oA-T?@VE}Yi??}T<<hJKz=-5{JN)D
zZT9B=13zj#)Be`onZKE1>ETZod7qzWG^v#4wc(KAdveutx$e)c4r(U_M9MS|L`s}%
z3Ss^;!OP=`%Y(uP;R)OytPe!$m#s4jOmx|(nzX=y$K~>~&-;&>rit*Gl@`Wu{yDii
z(Dy;^)Ge?&euw0P9CzJ@?)8%uYb-xktMfAK+p?W8PO@3}QTzeH1BM5T9lR4nKIk4W
zmAK5HUHouGw!hJX3ei&)b_Z^*=*_F#zUz1dUr1Dj+O><eMP)t4iBJBgKahK1-f*hN
zg{PzOh|vY<llvKb*ey;tUr71pudrus%XAl>Cx;u-Qa&ka?A3EOj+zvwdM<JnxK=tR
zlhWtE$-<gZq}bMLtxC|@O~Ia;qRDdKoPTU)Jh$(HO0M7swFB$-#dGfAO=&7fZa&H&
zx;tVC8`F2DcP#hV)0qGG9o0YZzrnlVn}Z=}m1M(yPzX#vrpwQG!;X(Jjj6gpmf`je
zxdW*TQ>8mWtzS)}pT$$$6c=TtTw1Y1uv-Y!Ch4Bh$@9^NE&c4lw_XdfCs}@5{YJ;X
z{(*4AX2F=<A1yx!6sR4T*;-QbFnH!~sT01d9y3`x*D@A4`mOk}hH)SBJeC!I0tLO?
z>jHE-L90nGZmEvDY!uy~RUpG%!mvXvS2958gKJ9EGj8}ap%CcoWP6z%%nMcqSaG;(
zcZxSYXDqw&pCLo5RM4T=WR~CGmr0ANpYTOnH-=0-cjCu-hW?OeI#CCF-!Ppx%_F}4
z;fYP>*+B`7WjbSal-uUW>?kv_cYzG)sW+zmwEDbeB9FnWbB82n+SUGA^1Jw%hU2d3
zhO@G?<eYZP9Ss&g5^G|rQ$6F*lb-Wmk`puwO26f(JU#P%Q}M#zLJQVbe-Z0?mJyO;
zF)OqG?1MOd9Tu^j?(1jQxfo56)ta-laTdeb{~WKMP28-j!+zFx8AC{f&YDM?#OgrH
zOq-56n7ozqJ<Z2t#VE7lftvbP1}($u3^G$bgm(R&HA_es*1@x9E>}J&#o+$?QlN$O
z;mBp8f?UTVXT6WD&<}Q8!M|bh0cA(lgTJme&Mth;7|k$ionTz~lFw&3oR3bF6>9RD
zR@y%QbChSpsm{=d2PGoso?7S?JDR(vuf6}aU_U<(D=1`^@H5{I5IMI^NMxJpo!*av
z4Vw*7niuya64*cLHOT(RVz6F*Ip+IP3qQs>W{o#5)|bS*{93HkRV;cSa9_$91C>c<
zohSSVcd%^mI;f`*wbAR)ju<78R{L~^6*mf|wY(Hgwau%JJNY<VD(_fod&`1N$#+9~
z_B<~Up5vHUH;s9A!=DyogP8xyg3K!q9N2hbN$llNuDo-49*dG2Hd~x$+}qH1pyELI
zM5V{tbG>{(&CUnf%(WA5@iN4-ZN8&?*W~m~-U?N@$yWJ)6m~@`1T!~IW+<C-+1Dbg
zQuKqBK(~-EFaH{4lcUN_YU>r9_cQ903w#zg**8VraDo=koTi>ld!PNEJ%eq7?jwa0
z{~Ewui!$Br-t{8<XZiA$d4tL#{fjmKo1_i$^`8_i)_Ygl_OQoi$CKrEo|N0#*uJk^
zWcgT!HGPiW$9pCUdHYYj3%6oisJbkt;4<5^`HQ!J)~+b*FVo!r+DChq!M*CJ@+B2>
zCML$JdhKGg%$}Jk<Xe5T{F3_trG|$)r{!;Yy^p~iUuFG>CxG?ij?1UT8Y-jN{%{0j
zJUWoA&OA{>au&BO;~9o+%=?+e8N<8|A6!|tFT-N>;fY53Dw0gUdYYumEBWg)I*NcD
z==elqmd#fOg(8QgptTEIoIkoVyk^+u{kh>d!}_NJzBxDN<+55h|J6D0?TlBog&pgS
zWrbo5+2`7rOc}Y(mfS4+^h4!9wJmEv6{C#BqD&QEReSa|lir4=oNKdBmz;6D(;-|T
zSmoNcReBwkvO8og<TThkjyU9i>K0JI%4RE1esr{V2BhtM#OKF@NXg6#Z!(Mz?9pYY
zW6)U5@GF+7McYWm_s_|_=cb#)f8+6vVGj7m{b9~2LEq|SyPwxs9#`^F4*Zm_a)z&6
z_OH*C+DE?E*Khhg)o;&Jk20IGqJ7?FY9;?23=Wv}{cpeDu)UG@o#lRImyK%Mc0ZWM
z@`gpCPQ@W{_DQq;x;_8c99r9+zh|Cuonc;naes+DbAaQb*)y2el;5>6W8TxnBRZq^
zU1{5Vb`e%1$B26wPqbKmu}83c_+QH>khzTeo{-@Le$f37ilFlQ_K79oD>qM912yTU
z|9$swe&Lb+%H78&m&A4PxN}R+d(F3McVS-f-xWKiFx8cvU-_Zn$Qs2Dle)Dm+_XC%
ztx-2Ru;*I0nX)Ix^YEL0z0^5HeiWT{^f;<@W1q?jm!L_FQ726Qb0693Ao+l&;jPf^
z^`98hy(;GZoARt7G_9|3x9|tm53B*a3vM_{pYsb|qjb3BAO8cLfY1avhy9HVOJxq2
zO{ptqigSIYHz&Ao<6Eu6EIZdaeX9|kW@?}KdoQ~LL(EIL%t(i$K}u{%pfjG{%voB{
zlOZ)zL@zERvLDpSah<x7iKQ!a;=>n8rw@s<KNd1({3W=SVc(3mmi>GFaS6P3@;UZ3
z`i9~Hp~KS?CmaRG$%LJ!bn{J&A9hsUy~A|EezC+4=A}>nH!kpeBk>?_flvWQ!ur3g
z+jDQ1?fO@?V7cbGe0DZw6UXHCSs!m43tXdQEd5TY&p+e9@nv^!eto>q#DR4c%Y)Dq
z3uk9bh7BKYS=qh(fB4fgb$$f~_S1VVF{;l=TwFQxebAlnBCz>om2Sqql%O((V%hK)
z?U(<A$+afS6sQ%L91u6Y$iBeHnBAjo$`N0Crg>~`YRb&J(ia+CkS$McFDhJkFVvh{
zi@9AnIj6abQRc*pS500={x(ciEyyYL&yhZEb%phZ;Da@vMIM+pbQxXdo}T$(!`x3?
z^SWgiHXYiwMQ6jssvYjL4g~J`7pBP-d-Bcws~UEqf$RTtPCn0IcU|xvcij%-qwCJ|
zuba{aYJw=d;Q8}5ZNlOXaYmEFFZcs2XEGL~Oj<lCoM}B%NJ1Z*MongUNzBQgKjrgw
z3sh)XZoeMOJ+tw0!G3?PpYwLb8c1d|=kT(uI8)Hjx-3zjN#xwI0K<)4#~pUuTqihZ
zYvOG)xd&F_T848rE<AZ7l|gix$EMSdE&20SPnq-T*XDd1u?Uucos(S7AJAY~I`jV0
z#6YQsF;?ypnpV9{j5DXXItKk>*~nY_Yl_CVNk>2(^rZ8NIffZd2lw24+_~-4*WE1c
zeNLaO0+|<RvP@;1r_XA~V9|8)^KXVz%NfqEym5zR&%1f=k8Ly*SZlz$e~G@(qh-uz
z>a{w+o%xUlIyU{>4-WFJapAd8)3aIUkA1+-y^Ip?=UsN`In*c~cb~05wr8*3WD~wS
z7kGNF@0|9GuYp-<j^u*-H4RC=r*E)TJ@uPoAR{`bpV2zmSp40j0~;CE3)?K;uy#w`
zTbcP^{wG#`o_t#Hz^6m$$De=Dbx56P{12sf0`7VKpTGBWL-C>)_kTQj_d<4w_xrt?
z91#JU-@1Aitq?gHckq|eBK;e;yMshT4tD(#=T>T3ptLAx>b=<wU(VfC{b_$b&TKmO
z^xaSNrh6Bi?>~R|90=5ypFEqtJZIa{^P0_b=1tFSESjmH?fy*d$yxPy9qUlRMW$_a
zZEt?Wa$Jt|Y{@98SmvyE!f&(i!`TZ0dY-o(zNH`>w?ELrs5?OJ;i~E#pE?7!9xj`?
z?L=qa79W-zrV_Dh4uAY^{1Sf1Ca~7xi&cuX`kix4vmCZ?O?Z3f!=L60%LNXf6gV&c
z`sVlYlpnW$NTld%94NkVxmeESHQUY9MVDPZm!@!aIs364j3`yvCG<=G3v&s>rkQn3
z9|An~8aF6#H#Nx2R8TnTKPjO;<<atE>ep44^AuIxJE__+Z^k7JFXt123p}0*+~;{D
z|6~b6=B6$_cH?P@%q68^;$4T5AFi-oExN8|kwD-ySE;tei#Pc!Y-XLvP`}!*^~aNa
zMGOpo);@CPw^I9e>~pqrNnK7m^T7qZz1j0l8yV<Fx-nV)-K!LCRsC0iPjboOnUjAv
zUru4|Ymk}ARC>Djm$I<`grZXl%by<=-W~9IQgm95MnT7k?uBs{T$i42dsilUXlno5
z_%%7#7cF?3v~+{ka*mD$SA`q*SGQ0672k1~^?%d67B7d&q*u323a>d8!s6n<;-VnU
zBy;Lu++xL>OD;eBp>%=q>cbcoKIuuF4ID?r8;b+yrY}fW<YU$0|Jr78WDSQ%*<@wm
zvL&YiV49yl{PFgJsets>O-;@Xy|YfTF5k6kC5MOxQ~IK9F$ND)Gg!B9CB4^vUhu8;
zP+n2Wp}r8wt_F^d296ZR24TnL4tp2KMlQL$@Q3mN&#Tv+i&7rFzqiUw$$`nQ<8=CO
zDe=Gu29GY+C?6J`rYx*C>6AOj+DG#btX`3%>2LbHYTKRk8imL5>-*+4&slLgG&`>G
zS;eC*niecOj6|OwFL2eE$<g8PM{vR6BC{h?Pv&U1IjFw~w^o`HZE5sy$;v(80NBSl
z#pNqo<*F0*VOtmlp6_DWWL|hcPWrFRp=yP-5uzPoHOI3fB5Ur)U-e^|#&B?L06%}%
z<&AgwL|g5?9LZE>Iw*W$RqDabUCmPtuG0`=59N($onv(3WhEoG&~o;3PJ91-x^gyc
z8_(};!H?tLYIWRT->CjfsUvy$=Ax<s3$-)rs`I#|*VPn7_j$}Y|Fr14vy8UBFr#3H
zEz`k2abKOcuKOCYR&HC(hoh<gQk5ncGZm#29n*C?&-Kh*&26<<$;M((f?$o^VBvKl
z(Q(cm&$OD2ir<;{7B~7o`ysMUZwvRP;~vL<$!{yDjXN}z|AO1zLO+rAxhGs4Sdu0?
z80-&^4K&@i<_qI~t)ib#f7KNx-n@EY(}si)H6;ZmrGNwu9~a->m073X?haUBUoic_
z4Re9}8*F7Bahc^i$Rw$0N3fWzS(9)#|LV2`p&6Z7@eH<Hh3{0V6WSeTFI=rKlcUAW
z!PaQu$F7f88l@&1#;u*uH9wfm(pG=Tp`)IUqE;Nw>o~LHX<b^F#KiUm8wJ#V#IxnL
z9&=P(7y)sw|CY@u{de|HunpI`b$a!u&IXP-J_=LrYk#}8IlXQ5jNFzV`!;Y*@L_RM
zR&ZA{i8GvMHAQ}=#QLlQkDgC)^gGWTF~P##fcvZR=9`gMCToYRZQJJX|2Zf7Ebr#S
zhN%yK?P<}TpV4kE#9E_xi+d%=_!Lit8TVIbooHMgB-di`X2xm}zgCMaI{rS+r$4c*
zHWFnN6lmO`W^ggsx_WkG>Uzf%<_>4>9g}>yGxSunBUrc=?%M8<%oVmZG+Iz&0?Uoh
zb(|&qR|H-$gtpc-W;q!Na`ZM-r8qqneY~={GD_%i=$UOlEK?_3{=7*yV2;{@3kF7~
zvpOXDiz6I^HSX!e%%5KHRQPPodgq6A9nY9n<tQmIISO(-``&f<xX|CC$JHtO8sxrp
z^PkQOiYN*M$Cv^WYv+N)5VH$jsb=io4*cO|{e1kqo%`({$_3J^OsxK`S}N7qz>(tA
z@SgMY_T-KoQmY=<E6DsVN;%{hSlQ9Q(cz%SQS$m~eDB|w58U6bykFjP{@hP4Gwr3^
zeGbYh*-RfZ*Z8pLF#X!weTH4}RNea<*5`j3C7Pd|$Y~vvs=)+GGzv^g3`{kW_pAfD
zGQcLr2?X@}#07-TgUSYM;4E1A)b7GdR_W#}eI}&|(^;-4Po7>fbvvkB=xE>&@nQNK
z@b>40WlfQ_SEE>56ogo=C<}9*+SLY9aKy54$q|>}*H^NZWlU6hHb3_Nti8Yak5u-C
z?$zP=p!l-+!r90r7Zn9NEE*@NcFn%EJk0#K{~Uwc<?oE#`(^tpBufr{5SpMY95?Tj
zFQ_<4ns(rX`#a;@+2*GGw%=F%tUVbo`6`f$w_#;+yy@{idGqP_tEPXyq51Um%-`&H
zGNMd#bGTHO{`ZXBoH$w1`l;~dl%HDpw<P!r7B)x)$okt#+~aE96~KId^`YXQeX_S!
z3ViNNRi6Dy;O2(9n))OAFPxv?!*a;2q3=Zcwf7e||M1<d4BEGRYfhS*P09A3|CYy}
z?M_y&>Ysma)lSKp$6uFKO+9tbV)^ORx<93jlGZ<kH@|1Ne?Q|)&&kJUCN4gHCY1TT
zVP5&M_20Hmk4yL!U%z$cue0CN&OGV&-7V&Ow!Zz|^RwsqE?C~$lDm7R$ye{s$Ak3l
zWSntd9B=aU)D*MTA9q(gf2{mwiR3onxEi*nt5a-QcKzeCW1qn4?BIO5Y2gwRP^1Mg
za9C`L4V)=(Bh7d7trv}zpKq$4$<&=4?Qij1KPKt#Lq9)1L*7?cjLzHC^k-h%uy3wS
z*!DlxmJ*M;tUoVUt{~O!yVR-Q>1DtM;R9UC{{%byPV2m=>-Z*J!u0CwTnXcAztcM6
z^ecN><eGn^z3;ogJ>iyQgF-h?<EiB#2ArId_C?`eEY26@y*QF{d*kUdGym_7++DcY
z(`DCZ>-mY6%1673yuxQq)rfnu#HQlNfA6?O6@{-`oZb6oOnq$@acgV(dd;{sD>8%j
zud9^e3%xt5#;;L4?;zu1(<ASb*1o#BX2bjXKXaD|I~QC0S^s2qP+~ok=Rd();*5d<
zjDj4}jrkKV7iBO_+1Ra|GyTN=9=0#Lj!n@FJ(VT0LHu~#s*?{>7tgP}Gbd_m*URFo
zOP@`gsQ)}(vA8^STSuDnQUxoioHdt!te9piknZ%qIm&6Tg7}n~8>epJ|Hb)=Wo>gt
znO%+aw}_7S_awQFcI|C%Dr<Q0;_MfZUott7)8`4?tYMDjTI#Urhw25<(8Y`G1ZQ!{
zwtl*MYMNeZ-o9^Ir!9ZHx&8iZ^!xfZy25kYr_ZaoQDnBOBqx90+_US`Uu?@cnq~av
z>BW1(KO5TjI!%2LGWYt<=E(gs&+A=U`}^kHGduY0eES_b3OCIWJn>@NGadf9s^Ba<
z!LpID<Jsv0FE0GyyvzFW>~X{VnoB1hO6l>Om-u)qI!?V!^zo0+=4yT$`$AX87*2cr
zON+~1)=GBMT#5U?{`AZ(6S$vZd*pkA|BT~yjq{qS9JUEJFSx$XWrbXcS;?mg$^1<#
zxOkj1f9%+4nPP3o5taYGPVRQ9z}#79FE~$-GiG*@xy`@g_UsF#7tCJdzR=N#f7>PZ
zT>9I#HF5h5n}rWM{oPx&_9xpp?)t-iUnPsC-Db+av+M4e-^ClJ`cCayo^qn^`l8~=
z4=%qBT2hc$d`|LD=ftNi4Kg$51YEWRC1(K{M!T+RnN_SC7v5$}a-Qa|qu+YzxN(B|
zv^8;SrNRm=*=m0K`L&<O;Qk+#sQ1G9#GR7+izWGwoRj-+7spx4V7XbNOsj9>r0tF~
zO|MxldlI9uJ8z}3hjV(nt>nf8o#)K~oBR`JE8Y>C^`tYoUwHnd%M%wmt9QpU?YaIY
zZ3oLE?|^WJe5XHqtC?1Dx!!qmf2IEZ`Ey@xjjyeq^ws+5eP^fVhvH){s6=dDU;W`i
z;(JZal`GReeww<>Y3HV8vd=!<D0}uYcg@3${kl<E?wQg|jY|YBX{`hojoOU=1eYDk
ztyez$Wq-uWo$;*-U+p!*<{el*T~4w3(wk{-t&NVj|2!u)rGGES!CRavcSKy0%&yJI
zU$8KB4*TQV5BR4%eQ!~~H${B@QHSeid~NLS?{jN^YT9up=XUb}Ll5WEIZ7XQ#x`Va
z<qz2Uuw%JV@fYSfLh~}J?|H7+9~|HF{J4chcF6j^o&Pp<uQ~Gc)Raxnc&@DoJ9;br
z&Gs87dn`Y%VGXsIawaZd>Fo4~n>{-}`?Q<MEY;#}ZIGE6kfFJjqr+j3(23XgGN0Ak
zUEzNueER*u&Uw0K8`HnFM9miz+8EF&RT<)8WpL1b9dAs>UJH?r$KoD5YAI>S`FYc>
zQc^v6RqK?=LdUHbw>|pp^moCc3oI{kH6BfS@zRyEl<9BKnd*=A^Db=iT<mlDyxg?t
zb;-|jcNHGpyuNNm^0R;6+^bZa_W#*<BI?u5<lEQUy17L3qpoZ@*3}=g?qC1X`c#kA
zb02V=n8?{TIj9p<QTlOw(ER)MLhl8?45cHLMmZbY=I*qWWo+dZw_2H_>OJLz!13~<
z7qUXQZIYf`O>|!S{glneN3YYazgw}UsLd)<J)Fhu!5ec0y(|Uo3YHfQ-&=k;&Q;)>
zFU#_mr}ZwEs%n(*kH6DB^g@l^mcRNiam!Y=AJ(B;gIxdkg&Ll|zQ%f`%vq^x#^v|#
z9M8GC@v_a|SJC$#JOBGVsnBZfy;`<)hd-U(p7b-d{&82T{J;C=_cG22-sze5>Tb3F
zKdm_)e%r!)cYnCE*+=|Lw`T0BC0m_tt%<KY{FSNnMb+jo1!1P56r;vP8sGv@xUuj=
z`{8`OmPP*xZ{MqBUz2scrT9~KWJ>GX|5n?wQ(5_C?Q(L>^M4<|we9F`kyRm9N8jAo
zdfP86cjup?$!uTOEbrg_ea;_tTj!*=N30svRxljDcBS`g*NQ)uY6W)ZmOM5R+{Nbg
zSw(<j`9;Tt#s_|CU)2gfRCKDwZc3i)?PY)c3WIEOFJG%)voL1*ocg|>dFS+Ba@sC5
zzF-{0&OLq2!J;#kTlfC{R;~V>eKPy~LTC53!>3=IxR7?nc4yAPCreGD?#Ipt7v(3s
znMxAmS{B{wRBSnW`|_O0+WIfDUR_ynPUG~B7R~$H9iq2qc5i*!Wxx7^%B1&66@S(_
zUORnn4fBtFf7kc>c5dIgyWnQg<4@Ha=gjrHy+iT9y%~WQ7VcO%QA<dxai+yW?Jssy
z)cty56{`1n|Ce58w$yu>C6mpu^6e&V8L!Qk|MmNOzb7*6JlFG$JV%N??sGnKL%R4W
z>zDmJ&sG;q+<W3i{OW57jZ<Q;HOrkdeR8^a=cSoC>7a&@ah{bosGaht@4)rV&!qIi
zqxR)U8mB!mc-^l%Kc^^z?f5L?KNq*WyjRtKP<rFfOQ+jPvuAIgUw`WZ|Cf!AkNdt(
z^VK=tmVcry`;+^<FI-1d8!rcFSMAK4mmL!pIJHZS>3zM~S)Seh`7Zz5V)l&3r|-G5
z=)Uu-#O~d9O4dK?&n*^}XvHI6|8?%pho8EA&Uf#g-S;*sS@RihgzB0Zmb|Bre|F>v
zkDFuspI^In$-m_FXMAn-x9c7E{I_Y2@B@~RnV^EHqk&_Fb9;Y;{`#1frIRydb{(7^
zb*b~_$M<>0f2Nun_ZTmKT=ZO6d5y)s)em`iUR!JrW<TBeFy-d&w*E_ZX8iacW81C8
z*WP!$J~Z@jhQfjJEjHhuS*Kl}xA$7Uu=>N>erKAFp5}=^H+#;cN4l?C`t8ln@1Htt
z-s%}2&AV1N@BEnX?DI?GwcitNcJ|Bg-kF(le)q=9ZM(kO)~m@#o_p_kbkdp4&kGN3
zFZt{{U2b)N0@KD_R$dE!Y252S@Z{v=yL+p@_kI6;W~tZXy?u4%W|w0NJ)+app4oBV
zt@_%(>uK4}MSRss7GiV#&dBvJpFhPV>SeUme{CM`-5bBo`<>~1*tPp&>V1Dn`E}>r
zcI?ZZ<2y$pc8ZhMSxXOF#w?DdZ6(Wu#3Ps<{@MGuD{-p-%7h<%({ioLQh)CbW-AWb
z;c&9ix&4fN#sAP%_gnr|?9xp9vv>2xme=Ly&*|$#^XhVQyQM0DD(#Q14Tle1YcpUp
zQ&qhx^;$Mw<kKFrjdog1pOfU8pB#De)7@44L=S`fhLrCUj(*?6T;ceX@r%moN9_)4
z{&yTZKR3V^)Efqk2vjq@34J7V&vcW2RZy2+ho|(6`b!HZDC-@QjCuX-@PzX|_78Pf
zHf#CU%jCcDv}3rvY+k{Fv$_@+R4yF!c{E$`U9Zn0QAj&NfS1K$i;mQ(iN{klI7)fg
z7Fzt+e^5sv%S)MQp)kkwa$WVGSKJFL<JMm2&fVMb^{ULA=k;9iMLXnPy{cC@@^}42
zy9vsHY{z%B<)yEXIbvljbHeK(L;I__k{XU`AOAbwc|G%pwik=bgA^vqpL?w=ryCs4
z$cUWLsAUZ*xC9slPdGE>PW<kd5!`V-!JqH`v*|LADrc(7H=VFo)Y~~*+FDk<UPht0
ztYf+IW{yWKtAiKR1}G#~SsCes8-&SpRyn^B?J9q~xBrw|#-8Tzzn|tcv`u?zn;h#1
z%8o7$EJaQYya86c^^PSD&m5*Z#60Ubej~%bT9A!t!|sLO*G@2O-dM|CCHRQDX<ciD
zT($X+M~BWYD7>JT!G6STZ|<IBH67X$|8g1L&sT_&?fCuOk#~Wp!1{wW-<<ZO6?PmG
zJv(o^Q|AA|z=o*;b0^&jaD8EuA-#n?sq)5c=WPCs_hQ^$mA&xo=<_wt&~%Yoy--_G
zYVIP_3)ul_rA{SmyV<IBPH*Dbm|2>%=kDXuR(09W%P&q$nR{}f))zsS<3*w-GFrT;
zX)#w4CAP>sIwdLZ{KWmV-h4)n!`YMcElpZCZPuUo!10B4hVqfbhHrn<;+z*w5_qeZ
zq4h<#gyj~)U9OwcVt>9Vxdj@`aj4+T$cU_{bgRs(dbDhLU*JE%-r0{@U+<n4Yq!2Z
zqFV8nW9$M+f%BCzSx-2xAG>B|7WM!6&7VBCMLMF*ZsoV!5Li~rWhieYwcIi1h+bR9
zT}8ip^=wJ`M^!p9Z+=*|vGI(X!CL(z$@iU~J8gH+(Le1T&1IX?dg))X<cpLG>sl+`
z>0e-#`xs+wd)h>7(MCQVb%*K&69c$A3QJ139OpYUzG`t+zj?kRvGVU(xntK37AG)n
zJM%s0yxr-q^#PI@+9uLTclDcIoVm-EnpX1R#DBFF#wPqpKR2Il_j~U6B5j4JiR3K4
z)Ku5P3E$^E=WRdNc%%9o`;kqb_N{BqNl%qJwB|9#3gIK?=L%<WrH0$7OcWJh`qOj5
zMPU=mlM^n}%I)HP?Ts?)dp`d(d+~|2!C}X~?8v9V@-i<2Vjdjctb53&{T$m|CbK!U
zy|&Eel}g(ZyxWg%n=bI<+~&=k(Pm+}yQLfV+gk5W{=Kf2;Wn3m^wyeP^@d00wI33A
zTbjWe(qb+5cxO(==S=(4=iN5v<|{a)cYkAYac{6?-L|)tVJ^$9v=#eBjoL~RZ<M9X
zSSEL*c(-HDj`UD<nR5v_3*;MW*1w(Iz{l{q%4`dN6w}wnSq^K0y<cc{Jk@8r`AwcB
z?WVQe?PDCf7jV};deQMwE^Vs6P~ZC|H>WlFS4FKDr?xRgy30Ix{=~|^>9g(cJ>q3{
z`bvM6KDV%U|5J10*y~A*vU}H@N?(3`TY|uYbgfrC_FP8L+$qrbLa9KnYcubs+F$3|
zSiV{xk!$`rA$P0b3YNcYp{;(cHPXwMC(nN#+Lq<?-f7!K$0!!j_FInc9kzWuX?L5;
z#_#x3KMl!6Tifro-Po|L_xKBy4a?h8{|ayFO^thSGqF<5(cR|voex$eBC8xs6{cNZ
z$9;aWu>*T^Vab9DySY5yq*zu7^UY0R7BFW0w0B$nqO&(FS*tjtcyk+1IrTe~@J)A0
zcX}gh$#!#E2gB3+HRiID@@x5YoPXGAtgd<WsKWIHV@9j{nZt~Rb`$3IF_&zz`=(-X
zc=KPzXzTgA1=XHM1*CTPEz8}*o^tm0jL@HNzTE0^*eCd)zs(0)e7Gp=;@B|pWMc5X
zGU2joPH$8f9CloOblYozgu}(6N7~*LcJwjy_GVjYm$1F4F^D%imYeK6{rHb5^Lw8-
zPsqKl{Ger*1NXxBic;07QO-Kuxkm+e$oyhW`uVTvjnx(B8L0^~Bjx1MD|3_ApHsb%
z)baFrrOkc5|GfsM&#_r;mtndpQ^mOHa!ME|9qjRo&xmQe(PKS(W3^a`-504NR@)la
zwf<wic*;5F&Fy2~u9#ksFKanJ?RbslM*-PQZUJFGLz0~$I6L;NTqf{(KOa}hlaIzO
z?vswy{NvyB`N1~s64fJHw*1YMeqBGU_xh7`rPZGzJHO87c%sf&CV$Gs0U890x(^76
zq+66Qn=rp(y~|q5VBx<^5_T5A#V*Ivg_AC{UNGq3W078;Gw0IFgnX{Gk}m4ayq6CO
z^k1}GIJH^Isl>qINQSWqPZft1Zxv_K`{TI*(kHfsacpqBBmaxjtL;a~e+?J)hm0$j
zLIhW_Z2Ydo_)9j6YvaNi?k~1SUhiYt*t2+Hwc_rcve<}wf)@;5xMf&wVNII%R?*tF
zN%GZ^+W6gE*3yd0_)}60IHb7Ww%j<eUw9VB#lR&S&3nE-iSDS0*cPnFck|Qs%#MV(
z>5jz<87~-gd{<pvbHliZNs8-Y;^V(Rn5uZAcvy{$s!u;xczsJe>HR6Aj_;rL2Y7e9
z$?$hL@3d$AdxvC&PiI%vSjg<UDz)=M*#;;3$aEIpgv%GU)o2KQpC<C%G=$X^5^f6L
zI5vFrZhuhQ(ff7j1$%+Hg10I%M6SrsVqe?*Ci$(vvg9P`@7tZM70lY+mT*M*`EzY6
zJ3NCo?wPOq!~6{C5VoYl(<^TrWBcIM(VM&M@wfCBIvwYZEx%xUL9ai;%}T#|;pv6(
ziqmHH@k+<6xo>$vuH)&}(=Vju+J5vaT8Wv=JM8vz^@O={2PeNeVsZP0%Y^K(ZJg{k
zZ#zA2+{Ul8c=?XU8`U@DGMxSRX0rV2eRns%-+i-pF8jjg3)?Ql3!L64cuG(H-u>%}
zakn}5vfFaAZVIwEkp1Ic{k&o|6KSue6F<^Ao<69Z!*<bfkGK`X#fg<1i&De)Z3+1m
z&+@6WL3#eV)u-J$7}S4%e|OjV-h1Qu?~YW<Hvc@*;e6|IMy*)W&n+6hc^oaTcZ>Ph
z9SsaH^Sf#MApfX9ph%LlwM?0)(Zb6v>MDY(_|`VR*zn<{?dj~>yE!(9xAA_;y5RW2
z;7NAthG5B0r42Rx#xY`#OkX}=N%o0mUG#X1_6*@am$n?UdtH1--){Nt)B>gY;&1FT
z4{Cb2A3t_~)2a5KlMUa04m{r}x?$}@&focA%7^@(SNF^pkDGr^Y_8I#C69|tO{7<G
zeT=)<`FZ=AQ_ts{ov!_QBBfJ3hyC*cceNS!S8teVf1~Hc%+rYMF_}Sn$xKtP$f$2M
z0S`BMop|Be(OYZ9RT6aL!Csb6rAZsIZBM5AbM*2)KGU;TBGS^(Mm>W4z`6OoZyc^~
zzMQf(cFxs<GBS&m1-eq79r^HQ%_qg*xpm^1jeqUx3ctP6V7Yd~+Ag{-%p&H@wq+bs
zTK=_EtT%IR)LH$B)yn;&(uC(MPfk3sOp1jhI|r7erUTI$s?SrtG~BrDr2Z@~L-<L4
zykgR_`X%3#-5$8dbe`(nxcs?fhJT(>=e1&QaZeNP_qmyUYSzA&r(1FvE}k2_e9FuM
zU)#?wH|>4)FyDOky06}|KZal5EcEyL@!YgC`Re6AOjpO8skys;<J7cA^Z(CEG~PaI
zbJldD$?I0nSNz<pJ}2$*J>j&E_U>o0_rEdN-T!H4gN{DOiHRq<SDgxm7Ai^Y4D-*#
zb8Qqo{?NVNF=y?@?@2mSX5K8;FtT2@<A%Zpo}){+mW1jD-%zo5ae8i!?d<RqS0?UU
z#_IlM>CD=+my2XtJN33cUYW)`M?5WZZ%N|iO<9HB;bz+Tc|S|${;YjHaf|TR`UuVw
z6FIg0Qq@3hZzTmLL+1q?URIn|>{a4X9I|b@oXj2OI3KjsvsRqRAtJ%V>!+C&6{eXt
z?VX4G)kuwliF(;VdJ4_=x5=0>+uwd*8@D`0@bj(It-jn(KHulgn#FxTGBYUVpR&o`
zlcFaS8UJ~lgeM9vF-E^5_gM<hdB(m->~KD9*KFl%yU<!;omLp@q)vx0mKT5iF??<9
zN}p;F*7r3h(9SvV{Oa&qQ}pHjeE!e-+i%8f`^faZ<-2_Ta=H30kDAjbB6>`33HJj2
zh3<<Nx$M+vk(n8D;POh)K$!xQlE7jXw*`03J6?C*mmFsiILBMLp0BrU#xq-uA7U~>
zr@3Sptqk*>t~<uecmNuydzSOAty%5p%WYNhH@K6d{m*aM6#2R-{(D1la@smi@A&04
zf!3@4+$!4f!Xv}@i--w(6=zrGWM$#Y+x=T11Fiy%GJ+3;zr_DJ_oltp?@PLWPwr!Z
z1pZzb$=My-W(q!-vC@B`_X4#GTpfL9tzK|;cv~NDdE5B;<Kth=%W^iV6x)e0EC_Y*
zKeO}2#D_LB-rM$hZkT$rsNr?mgWQ!fCQs%+#&w77Y<rsHmoq=^zGwLOPo_QT)|bN~
z2nTc|GUfbv`h3C9r*?1roM-6gJPH2dZ1*wdXpetE|IZ_u8chFK#1t0otzVK}Z?P?;
z>bqM*%SEBTFE`mPi*7b*oBVtEdb<R@{HSv(3(KFK7X5vu^U8&Nn>y#|H79G%+&fSG
zeD<6BwMP_}Ov&9WXmS1C$u#Mxqnl6a#I!#>(bB!-lOTAkLZI;iN5w3Qg>$E{uhdrZ
z=Tm*nzxIquy<Ot;PKOOF7Ry)ZU*Xr{@@=@+_US0s!O08QpWf%vk2YD$d1mRPzq!|T
zHK*O*7*gza_flAXk8OOK*0ij3(vJiT&-E|7X>0uN`l-jY`%X;ZF8|V;<s5UT`Fk>}
zh`5-p{h~s~;Hd&o2S_L%xOPYVg3F8l3NtyLv@q1RW-nN;Xf{dOv3Wt&1&$Z;8jIZ6
zpITHoa;aMR<gQV8mLL0ib8r7uJ)=m^p4Hud?|-Td(?65^Jj~ozThu)JyUg+G#J*qY
zlb_a|-PC&4vh-5h^0jy8>d&3U9(ceoz<mX`%lfzM9kv<G3r+O78zW3DjW$+256Ocz
z7EjnR@~J1keq+D$yqBd&0@L%w?@D6q8df>Kx%*r4RCx4m^IWbI7xb?B>`dysw`t4T
zusd5CGftPMzDp>mkbJx9S?22LzJIgVZ<T*jUsL(aWlw>zpz|fMgv@8Hx1Ecdn)do}
zybM!(t9)VQ1>p;k7ybtLNY6iV;?Zn}eLSan@|Ucd3mU~Z!rB<O;%I7swMV`7qes)8
zMXRPgIu#Of6I2u>uV0m2WunvF`8)NDd^CTw^HHfeL1(m{vsqZ(Uw3bQ=FI=MD$lI>
z^?gQW$i^9kJMTW*oFg;y9q*}G`oGM!d|DNtbn=SE<ZIkp9tUiaG%L<tcKyV1kGkvo
zKWohI64-ZomAhJv&SF#byN@&W8f#4lIp&FcW7v)_*%zJ$Jk#CzM|<`<9i~J@17_`>
zyZ^7MZT_3E$<*gf&#a|AZ;zZgW_0TBhk4@FpZ*<uEK{1=vp4leaR28|pN~1GJ%1?T
zR;jyrr|H_&Qw>61Ds?{dxA1*je?N8C{=G&C>-)`(S&MuclBH4$eG)!BIr;A1-s<DO
zYoE#Nt5~0GFh8i!WA^!#=ba4Zi!D9L+ZgeGU7^vp^%n$QJigO#VW*OSR%4X1_^02(
zg{xyWO<38dI$`q1rc=JZ{bx9<KiVx^bos(I)<^jZ+orkxS-pfC)RS!B=y2F4sL`!m
zyy^Yx33F!D?B?B7u!M7i`_cX3s+UqPd_CmvaJcW#`4CH(Y7?OcT&|ZF<S(4QXk&xc
zWKhVoI63tBXl$7h2CEMyGyK0-?7H&o1?P>CtW%>v!bU<DE^O+t*`f|DzCU_2IG>#6
zbl%~ok-MVY!c~3?g*7ya&7IU28yA6$^IP#!2AUE(WSJHRUYT}*HK6E?NVF<=N_Y+D
ziHRp?_4mPCr6c@6=ydLacNZQ;tm`<X2eRZJ%aap7gfH#`xv-;w;|M#W-;4JQ7dtIQ
z<U@mWL8ZIG+y<GMC1PBl(PdCT>=Afy>D1Q+pBFNJ4AQ*n403i2=ZT3QmoD4@GsLXX
zMdZKt3YjZ%Ue&RdOyHhXhdEPG%A>b^tLz{}IO|l0#E=@JBf8Z|PmVA&>73%|ky{!1
zS*g&Qjlb08k<rgu-o5T>``<UL(+9azNr7pn`hn@Ob*vxX|2lfX&;wk<75X~#`NZ6h
z3-tpz!({=3z=|d-zt(LI*BzpgBn48G6qr8xJM{Vdxu1L$x;CVabBn`OF^Bz5+nw|v
zhOmNG)-V;$4)p{FWut^@0MFApo<|!t+FmGn5#2HQmwr>4GryCv0(j6+M1yJ8oA?EJ
z+qJa84ij)@$#aN|Z+Nj|5A$6f(^fX88vQeY9iJ2HnnCv0u^gLQ-1zeTmZ|2~-j>bI
zb?2M8mHpf7m9N8Rn*WbWnbWb@*XqB}<>iUShfTgN?=gJMKW|Ug&wA@Q$!*(g4|~;r
z6Q6w6|9_6@!kEKWf&W{lHdLh?>I$9-i;W15J54e3TGuuGa-Q$_->G=Py$h`uUQU=J
z;V|K;b>ljXgZu&UD=PL)KNj4u$+atDp~&*fIyZLS)w%n5y=MH9883tOF01^zChuAQ
zJoA~4JSFG9w3gv34p#2FaV8_j;Ct)h<k@>wBK+1RK3|yojx%LX_0IGDmZ0f#@M4)K
zeGbKQ{+WfyM~N$}o-g>Gr@-f;l0YVt{ez~|g>@IQ1!UiMUhogt7rp=H{TZ6s7Fx!+
zk^5J_HZ^*@J0>G)e$dT7&$paQJ5ggN?_qAGbGYvR%^&N8@2+)|4c?ognXhy|xrgVg
z)V&^&6QYb?Pdwq8?<WSW1U3jgXpZAkP44$mP;V2TBJ9kyDMRr<+27?CR0D)FSih(p
zIsZX8AXG!EXLVxDro5?@3HNkX+iW+_E)LrHYKmU=xph&>&NchpYW7XYE9>{E@~P68
zk-cPjn1(s1Y1uaK94PxR3Tkw)+{oab{$j$9XGhI0th``d_i<mQ$hi|fHlOCRw_Du$
z{!qSUe!B(VYCRK~TYRol_ilfsUu<J~b&6KZkr2(WL($vTn_v9y#P9w1%fr;v3D3H$
zc+XCpl46|xJ1zg#8>Yr}6Yo}9$=NVk8Xepm<ONQBjDiY`|K2`fzFE+*y8q|1%c8$)
zLf%L(-%#Q5JAFa(`SfXf_4%D;PH&Y<>i03#Sfz9GsnP9zxoxLHmX}E8-?WR`eW<9Q
zD(OdL=Mtx|Kc!!#XD^!J{aLfBh;R9lipy!@93{PP&Rzca0MtyJH#G=WFvJPpXyv}r
zc%xuyr^6b72hXSKC;ydtv|*Fm1%(xICQM2CzE>hADX*1$^Lmbqhh3uG{SCLK=!D#e
z3EjRh^jY1!lmFi@5?2p7{YGlxvxQ$8L581ka|ab3pn=;<EI&GC^!ZHph>y4~+!!NZ
zA^-eR;ZKHDd|%slD7apU(zMXJ>OE!4sqHTVjxC**+uq9ObJ*yq&vC7*yqEt-9NJ)%
zHs`A8A~ByHQ60@=r&d3LIod|(fxzZ!g?)dD3{0dafgC=gvnj|hxp~$$vrbOVwA&Az
zpD8@s_v6-?wbE;U6mRtTJUQZO&#|9+b67;WQcHUSJ_X)709wK^qqh&5q#aZ^wj6PN
z<{a~}^L^XHEPo?GfkqpJ18wiu_D0W2x1N*x)GkYKf0bmf{>hs9v-jKHPkH&avb^W6
z_hiG=XVnK|ls_AbN^3EGJ<;Oj^AgrAwqfZ~obA5g_CiI0aIMLm4IC{q4}7?M-sJn1
zjP$Put{lE}Dz#f=>pFwv(`?3RKOgz5|Mu?G{xcKI(j$)7{YzgRSzb`T?B$yib7BtL
zh2NedJVAMKahVmYS5f5kfNjnR)hx^Y2K^N`I6>|4PYn*XXNnizd69XuX<^Fe@;vkT
zYvXf|t&MoJ_trn*<9id?!rt6Z7E_4*ed_9&BU4ZBIsWbE(#`xV#k|&Qw>?x|5E^*s
zxRF-zESt(ryUX9-`&c*MGRO4ZYl$<@y=5-lOI`r2SoN8fKRi_Pxl-_jUWS~CaAT8m
z!j}^P%V&wmm_1zc^1(t-9s2A<%V+1s;00}<lu*o6v;ElZ3nm#dF70l9JPId3Nkm~e
zD9=rqb50nVx!-VxxJ;U^uuo>f;p0*c!V0!i*;v3OZ$|?Mi^_q8sf_%`&Nns%98Y*R
z&0<BN#F^(VnM%$JWb{7kY!QDHut{qIWGG9a2Nbz(GfsIyDu)BY3z$D!Y!S0!&0@XF
z`?dj8Ix8tKC8{X6tMwR6oeHkK92!*vgiY&Nqxi43{;-?TctOkqT+=8`U<WyqF(g$J
zT<QlTaBOk#Rd=qDJj%I3vxM8F-3_X)9i&b%CRG=#PGC9<*TS&tPMa6zFLVvi?a<%)
zhaI$vv7>>*rJ*6NLz3~tt`>;RF#-=hY;?S^Rv=tanzxp3sS~)~6lmlSo}et8_+Zx@
zh%z5Rjjl^^d|%tFoZmY>cK|JC1EpREJD~~6llwoEW`fEQg+|A@`>tNPDfmO9XJ&b#
zM*gmMO-f%|*R|C(t2thGJPxVKIvhj<CMXNDnmf8en%5`FndTVZ;#~B&NOg<+qR*cs
zmMopj=m_@MMK^~&pDlL<r-Bzdx+ny(SY*yWe5A@L+@U05nUlEV9A;%m40a?j6{Rft
z@oYtgk^+;W;03K7#fMrOI<pr)D+$qSIM+JOA>QFl@5M<c8x%kq8Y2`H+||O~O*y5>
zq$JS6@Z5F}OXfc(1*fj<oM&7Y7=Gb3VSUB^wiOy(h8_-mK0NnKL?c;T6qp;oZwwcC
zB>c5^f{rxP_P^VEGi%G9P7c~RCvS88_d?_Uw{z2O{n&c!?RFN&m77}62FuS&?VA3m
zLnqm;s<L(F-<N9IcfL=2w(PU&@~DU5?|<K26S3hq*Zn^!-j^4;n`-}gy2JF@?9SI~
z3Z~C9J^S_h$Ga1ct@)GOU-@+2{j~*h|I4L6U7xVgrftm|P+nIEVlfd>4q8$wD=5&o
zN1=ezD3?E$KeuJk**wR_ESKnoS>~FPGbaiO$u6sTn)GZ@-Pf}_ZL{Z2dG_hHRMlJw
zxiwMCTg-dk-Z4-zG<@st{qj@m+05tRAD3x{sQmT!o4)#o*EEyazkjA#&$u3KX;|qS
z@v|G$9%JEL;jY%Sp;S{)pmBrnj0TyXb^TK|JeQfrqpc`??qCGZUhY~h*~U+2Pt6HB
zGG*-vKAq$n%%7kBFg>>5`O=7!D#gcIulmm9a$S?$>%Tf7t#;D2+lM-4^?ENpzI0dV
znSbk}<_48rkg=Uo$1OD3;@0^Yo7b<%6q<Z#?({>hLTi7x<({6cwbd)?;oRuH8Lq`;
zQHg(Hjn{>sw8f>nRFqM$LyhTZpv1N%=`O(w|C}-s`aNfVv1W<z7po(kPfN1W919ec
zviI#Z{GVf!vuyu#yZyDtmnlD6c04MARVz>}_s5J_D?a98i%T*h*5|gnt1VWZ%)ovu
z@!T$@<n$?bm5clQ^Ur(_Hk)J8a`s$&G${FlCfT|gWM-~;@nQx?hl7dmisnNLtrhS0
z^o2xrJl?Y6%%PR@DykS}FW^_~+d7L)v^lTw#%dA6)mJTQBW^C6%D(5iZjjE!EiWh8
z<){7rb@*O#dQ8CCE4k-hB}T4K{e5QR;+4xH+q;&zygbx)_&DD?Uo&^UUtdlnbVOP6
zXl|}9f78Py>SvOAQTfU%ow5+g;)L^@*>-)kF)+7rz*m;Im^0el?f)VCPs3r7d}-Oj
zd+H1R3hZUn+WcXE$MaPY-}XMN&^lMMTzE6jqGI8|&u=GM?#&Jj-dI%_ydy)Yt!k=)
zYuD1ho)$F<H*3r-*Dhb?@zJID=>xZWMt-t;Pgz&apBS1wy+wKR_azHcpU1>rwn~5B
zmppB9@Z!YG!*Vl?_18a?&HHxPUD`LxtL~0neyIqg80bi3DoRQE@oeP_r3dZ`wMI?%
z8!hfWb?L7anA?<L_U*uz)*HVMRhuwMG5lqAy_03C`F3xb;+e?I3%mA|-{<R*`LO!t
z-nTQhRu}HCx;JmCs{LD&*VEpa^F}S5AH3Dib92_idpx#AU!$fOm(E)D^}(5WyKZiI
z6Lay@>WEI2;%ljy+OuV?=OigvoV>qo);36Qq0q&l&qwC2PpB&kmqg>^75detdhP%8
zy<Y#bIdxTj7S~;_v(2B*`mKyRBgJ=&X~~>|D)rCPkFos!c6U`(wU<}Y2?^PmUZ!2M
zKd)Q+RyTk3X06%c`|Qq7Ibw8XTWGVg^XIwB#bQ;l87Gg=eKtKa+DBMN#v!}RZI<)l
z4|nvp&3NoSmj^aGpx_QF-#KRc2{8(G2r=m|l2KmpQlP!cb@o}O#(&JRy=_fbzs;22
zoch)A^Uv9<_Z}@;w(IQ7>C3)O3t16%CMTxwKuop2@Z=DUWcT^&g%3}@3AR1i^@07q
z{%0>1hD_#Je)4+Iq}aB@r=TUC(gbPH@@i-Q=UojPPs|(p0~)*)*2R=rZ{a$1Bg@Uj
z?$-976>lf&y|uPaW0nz(_jY{WnY*&UB&uXp#;kIm*N%zr&u-cNvB@X=R?W0S5fTqB
zMN3DVv~+KKc3b&yt@*uWKMx<z3~qR_<A;>2jGh3vPzEh1z9TR}d9rksRXB@_f)vLW
zSH*wag{m8N350V@PIX!^?R>X^`M<drCu(;6)ZTWHbLX>p$G)wb*?DiW<Ye=T?~*Sr
z?q0XmgP}UZ%h8>`sBv3K^uC1cH+NdTooZRql2d$8^3(a#cFWd+>$C=r7AJ+rGa3WV
zg+zg}Z-*RH{{oG#i<etmjGXi-^wM?L8ylLs*DRfFY`XXQlEm3u%Py@{UDM3|eV)N(
z^|k)~FF611)~($VbF!$FSN;8E#!YYA`!9*7c-u{!_kEFkxzzpQw_k3-nvE-1zC1As
zNiv*PhZs3Xd#!ozwM6p#JvG{SK6PSjrN7MXGM_Ln`B?wZ@Uz~P_bz|#r-3?<3m60g
z`h6I8-CVT_>Tpoi+rV*zo6*wfW086&KgjuzOeDZ4sNukJDZN~5f!FF)pkyrI%VP3G
zbJn}IQ{bf$N&?!T28wpM*HaNl(+5U^76LzZT?p=DK`a1~4r@RK>ZtP3kP${t1RWBM
z?`qk;>|62ufSBO!1U~&MYrp5t-Ths*B$Lno`@Cy=zc<+L{{GJXqVz;x1*sn=CZ2qz
z_7XN+15R59l^>MtKE7P4IC9JNI}gkFO#GgA+?gi)wNzrevgVDNMUDM3GZWrVy96C~
z04L2Os*I}z<&(}975Q~cKT-PW&8Zo83&j*>nEYcsd(!e>CCldb9BLCU9not%Iaz&o
z_4jwO@4w&dwHA3_+H;pbF~xh`d8d@h*ID4Kv6ZFj%gg%3w^QXDgcl0VdhFe!ocN{Z
zWb*ygM#Gk@O@}SQ9`?JX%IxO4y@_voa@E91eT<sNeUq18ww2QT@yufvH*e3}t=?*L
zwtejQE&L_oOp*GI$vvleygUrNWAZNV@JM>p8o&4ZH8W|Jwv5{5>~)8>zT5nG$KDn9
zA82W6Y8ngmBriF=gH2&Q--&AB67eX*(mTigHr%#&^=Oy3K>6(+&By<rREmF-yJB+n
zZPm$#1?7h<s{^jBpQI&H`{DNWc>Biev&Sn}%-8J7UU#sv|J)3lcdO-U*}j$UjQy^>
zZeQW-kR$&m1y~dw_|lo5n=((PFYV5`i5%OU&Ceumn3-{Bot(%1oMTf8YiFnh$Q^rX
zWg2XE+)KP{H{YIyPw}0XwI9yfJ@Ne$qv_?fA0}t7FEu(}FuN+{kZ;IO<k)zk-6*%0
z{m!$ZnVkhZ&t|z??(ePHe%th8lilC^goAhTE(`db5auu~w>Trve)uDsOp~^sOrb3!
zd&2C)x%UoR`dL_?-Y|E|`s{F(a&r^D>#-I;B`xIY4>A6bc_?;r-Ichq)aoyiSGe{H
ziZ2rUzgspv|KT2y)T-&X|LU##?%s6x;{4+;W+=SNjQN~wa_0R3)7Lu#<>Yp+xGuYO
z&as^g2Ziz`TilrVK(4)D|CPPRmwm7}^^fsfG`nol%*jG$U*+v(FDu^}<H3IXSYlq<
z-++65>!w^uuQ<nQe(#=;-<f`m`j;&%$M5#Bus^z68_+k+#_zONe?!fj-|>!n*%$F8
z3;QPdPk!)2=*)ZD4^Pj;oWJj>ef8vjc3Zx8Pl832!5uLLD0$Jf;qa^nCv2G06K$&0
z-+h|$?r`4TU(<_s$@VAwXFj&D@qhkIucdlFo;;o2Unsv<dhUY<TmIHv+QECHc474U
zfBw>wKPXp)#Tf)2TUhA7|8U^-hh^K{*SB7czO?pw!$FJX`g}J03pXDsWtN#(8_FH}
zyXW^zNvU7!Yrp^7(O$8pdi}qn4;*^hnLj)!4?foa^~l!7;7gMqh7>;vd{Vz4!_&P#
zGx@kos?55bviHuny!*>$_Al?zjqu&yr7no|c_>bK$tu@wd{r{;y1D1FjQjcb>;Eoz
zJ}0}d_ZN@#+?D)ElY^F|9Qt|rSl`{V78d54=dRT3mRxr5<SIFhe#yzl(oeCUnH-aM
z++)gP&u5KtD=IZlp4=`q{q`=s$A@x@FcXw?qjwemI_BpKZ00m>eo%8}3wP7swt}Lp
z7^{MWkB{49d*TwT4euPZoPELj;)I6|ZE-Vq-mh+}iOiJYOun$&u+!G?vijbwBHER)
z{PtO&j&vUGV?T9RWk%&U!!K=b$`2<ytiSlZOvYx---5UY<q>TQ<CneosJu@9vW%wo
zIuq{fARqQW`!98U{P}+O_v1c07My?kdCofF?T0F}8$aJ(c;`)S;(0z}w;O*yZ`t@*
zN4>4^^8M;!&9>WXw|zQVS62B!rhfLne>bA5zU*C_-5>Y2een<9hezhToUeB0cL7)O
ztyK%Y?3sGy=Db;74worhxv_bDFUJ#e#(x)X%)cD06fmQ>pjUdKam~7|oQcyCC-~24
z0+mvriN|u5Ih@M`v|Eq;u4?y;$l(5LYFA<*G|%5QNad~LzS|MDh6@=E*vvY+@8#rE
z;{RCY9h94X`{s7{`;D@N7o#{iIUm<v{?RVGj^&^2xdhL1O4HJx_8w`KI3}?9P=W2Y
zo24GNt?E*2g@n#p<yXyTsH)5>-4ztJL-t+v#S8bl-c5ae!P?Z?w$obhTu|1px+sow
z?we$%*UFWh`l9>nO3mA{o%<Z^cmv+Cc6N4dUm|~YcEFnNC+4u}?D;6N`|v48VTGvj
z7b^N!trq<UEd#SSsQZ{d<X+tW#FTnL6Si6JcTS0Ib2-1y4CdGAEaz6lSKifMy<OIr
z?>iR{_v8C6rwvNph*hxn?wkAPm*ba^8y6Sm3SZ!Vlsu>4{^IKm<qylg91>Xd=)`~j
zFKId-JtwVJsOp$@@U`XQ;^tTVe9~rTitK*m%?@e*Eq7-1M?HfF)q+d*Gxwd6H@+u-
zo0E^HZ0%9ryzQ3{T#V?uk$0U(F6PtZi;MY<{~fyb*Wb5vwi<`t)4H-sgWmPA6_*cG
zt<+q{{ZNOG@$>Tvo<85u`zDowd{-wfEYD#4C~~acK0vbBSNuiIvGi%j!#lp`&L~ZE
z>aRTh^QNF;p7HLuj!2D4MuiDCpC7S*$6xT_@0_%@qyJ_8|4le|Qm0oo{5l`YjYkW8
z9n!UDY+^YPb`cgPkxV%s%HP|5;PqLve|h!%f7Y^`p2Y@+ER#Q+eVy^ZUd$=8FL_gy
zx`o5^n(zN^tT)Wjxg0D!`OJ(dA?9Hm#@nrhgxJz&X|zx5JF+DF?#h+sXQDU1Wsdp(
zl*QfYrpW8LhJC`uYQ48#%xF0$-)`A>tn#Z|En7^ihGJperpqViO`RqFMYiCXYc0F1
zcj?Z&BNKL=E66e{*<V@E%d-5lX!@$pxBjwE+P`DL`CR3g8S#%xc5i5s3`^cT>-LZP
ziC@pLoJl^`axuDqS6@EeNodaYlplTX`tJU|cTL9D_-et~hhbY~m)W&{PPFSYJ+9ua
zVPxCeJAKU?UYREqyVw`@FWh@!^M$7u`UUpxs9)&6kZnTl9G9b?Ek1Vk?hw9E^&;)W
z^<9bW$xPp!=k%CJzhb(1O_cH2N2@cB^Ixc)*fw3@LFv4vh<)V>xA(a{Fgov7c6}$)
z%NgfII^;i?3cNnGW6SIYhMu2>@4{EusAyNPEL(W$56>O0kDE{Cy%ZIwX88Ftj!(9|
zuKAYpmS+Oz7oKvG`E=C4{`a3ZYun5m{@DFve^lN&|7ZS!<BH3+stG1VGgV36V)=OT
zjNl9P7d0Knw#;2SVWS#X(SEU%>A$niZ<OEgbYt9w^8%;$$UZ8+yG@J3r}dlTe+Pf(
zKkGj4k%NvPI<PF7<dF1J$$x>V-LbbtHc!2L|DMe~!?ES!{nep|_B_+wz}#*8TcS$S
z=w0$wqbC=6c|T=}->ofA?D=-8;Evtb+24;ne7_@^uVjr+;cDGX`BP=xFAde7c3PVK
zm@fAJZgu{{{JcY@Csr6gyY%N#(Js5gO03qx!jsu<?$wnz^^NtCb&THk<(huiEjjEs
zm#-BF%bm@=Je~27Zs9rO74EW==NOgzs!pyewGdkGe%+;bxpwKc+fV=SW!{(<pD%8)
z+hzt|w^YSXm1#bAT7TN<ACTO2<*iRFTl+KbB{RBTpF3L0zErMwe%^E0le*fi)%m6~
zb}W7HE3!cHA9vE@_J_Rznkyt+{4e<JXMK2gUhT2)hKy##dp65%b8U1yW*{A+yI@%W
z*O47B1*B_vKF@Iz$dx{_qkZAd33J({WA{6pQQrMpp;MCAP~MDZ=hCpsZT3mt#SW(x
z<f^&5I3HK1BrRI7Q#(cF^$tnbr(ajh6|i5=b9;Veo!Z;;x^sD>*iYCk7dli@wd4H5
zxNntz_$~x)xUB0w<J^|L|1A%hSPAvqX7+6otNzxXbo#wqoyXqiZyjdbZ;qM}`_5Wm
znQX(GJuiEH)_psFpib$_c6Hu<{nj^|KJAGMFp)Z~1{!e$b+ft;n9A4Bsc_tXJ8QAw
z*|L~<l4_^czqwX@pt`b*G3Cs{C>cJnGp1~OG7&Ex#cjX7_sz8Sn`Ya44j-4ue_woh
zheIXXZ1-b--f5jpOnV{fon*894eKXg;gtolF(>j~Y;Zh$EL^<9ze}>-*yXYCqga8*
zH-ayp6n^$ESm3S5iM%PiPCKMho_|(4wB^!-w-&taGhWVMm#&UgsAHJl{K92RqhD*q
z94qNXxeYu1E0iZ3jA?)Quc1Oty1ArgmIJ>-&uxy2KU8M_&S3~^Njmi8M9W{ZT(`bI
zGG|(D+{k}_GyTqMfwlTaYFBZIoS69V<Nbxk3x7@cJNLo!IbnRaSPbXBJyFy4!Caua
zC_`59chuT763P3%9r?IBX?AR0z56qD6%FRA&2QMbzbn>g9;uN$Zgoko;Z63*i5n-p
zum|^U8=HI<RNw#iJTd7{y}|QicOM_S{bv7kR!L8n*YhSEz4~VM!8UH`kH!ny1Z2f;
z-MygnBE7?pV=dniH|bvg8ST*vx5&+$I<Fu%N55^xdB+!=9mhU6X{Y#v`p@Xqm{w@E
zh0%)d;=u={Z$DkUxKKHWa~1DJu_d`pyZi)~<i7O$&$DguLeUHT0=;~~d&76n@%gN;
zqJ4v9pX=A3H=`dlH{KVNvY)^0=6&(%`}Nh<N<U<i^!w(1v-Mw8d&BQ@%Uj=g7Q4nO
zhduX7PMBH!UU<vgqVe2MtBl^~DSY+W?>IN7U;1bFF>1HX$6KII5omO^QN3Ydw(x}j
zxnqf;nTHQ>*-YwS@;LP<Yun-v>X&Z#aa3z%)Oh(EKV)Wo#!a?5DC4t&*_Vvgg>GRQ
zYY(P8-^^3!Zu_w$;?0k%9RK;|@)_<wy)f#6a6s@1^%QHLGP&o9eaEslo!N3MQ##ut
zV!`VzZc@8v=XR;vn!jmHimQ25$9{z?ihnQHNtXscQ{MgYao*or-kjh2-9e1e>a5BA
z;y3KfJ^IJ~+k~;)zP}*tLR^N}7rhekDZ#fN6+cp(zae&ikX_*)y$`!}V98#Pp)twz
zl&QaE;hDt!6+r<?-U<@#ZG4@%HBLUs&Cj*8r<k!kHEQX1*u1dZ=Hs@<hwkoP_(?#z
zs%FyJI6kTEI?h@p+#$?K`aE+v>`LtzzqaPJ-}{hbo$S3?cIMB_-S=wk9Md8*$`0#X
zDE;B`J4ZY9R!Z7!#bgobqLLLaV*ZLRvgO(6V<gBS&iL)fg>BAn-mYM=W&GNFL%T>e
zMS5<YmEo2?0sHxL7dZ6wtaolnQM{St|LWC^d(6H~@0xE!D<gSB|9<7UyzDM_JAnj!
zkF;az>vL_{-z2KuSt{)0-pspZ|APJn{tN#J%xymvklS%guE+Jl_Y1`Wy?Z5ZFL){N
zIZ>ZmcH;Vl=8Cr@GWbd?uSowA*d=Twyy!32#+v1Y?8nrvm^^b`&40Ef%lT$(yRFAt
z#aa2bmy<5F^NPA2@xKuAyJ4G>KpN8@JDaB`KQrZ&JN%LPUFZJ2y07@NhKWekbZfcI
z)!uv+-)<KFR1dkmS^iWTzpBP9?F(`*1T`d~^FN>@=5R%5!Qw~XI{F&d^B&-n4*6DS
zWw2PmY=_?0Bg>P1iyb*8dHHSI8#k$@X?MH+OXfd6Y#^P+*HNZ>WcihEh0lJfmT(<e
zeyPl++9#uIy};vs=d#6myI<?LN6kCjws8N)lNS9O<=+0`KXz!J+VA$idbuure|+vd
zJUR2Q(A)Es=KeDFlYN}8Hv4_QxxT~T^3CIw`_^w<UTkC{`{?__(vD2~rVM=tzY})#
zT!-GMKB?31C{x?QyXo{*iHE%cmv1~>w)ya1y9v1^&m`09!(Se-XZz@Qxcx%w1+k9I
z7b>ocH|9X*uRx`(Acsf8;e#(81i01&^*YSp<@|Q!!VTxk44dW(ANv`lRQAfjv{t$H
zkWHL;78k3rPg|;`s22OrsIu!HeNP)yFV<_j=D6m(i-wwSpO4SoZRG*C<QKHtEY)jU
z`!=Kh%aOuhxkbkDUDlBioHE?cswYQ^Z52GZDWGh<YPI9f+66ykJKd6R!Uj`8ZFnO_
zKILTRM{6gX?^!22t?JdmU4OVQDt3ELRu&H4ezs2Ffy>I9)|>brrBApW6`;~<yJnm4
z(R;Ih3Y<Rr=|i>L+GC$U)2uGw<)>T>jY|x!Pdt3sLM1I~#^>IYvNyI-jJYiqHqTWv
z)K!EV_o#hf(K~Qns=fMZLzrX9s+SLP+LSV7rj_X(F)D3c<!J9vyx_SaY<?4zCK@=N
z$b!~8$VNYvnF<;qoVwk$RR|o60*xPpAAAr*RSjCUBf@BDWLUa8Bm!JQL)3yuA&&1d
zl7W!b3<8Z6DhDc)=QwMgj%JdPn{}N(vQ+=`H@RCo-5<r>x_)U}gU|%!V2AuYtDsYt
zLX7RV*J?&63Z8aMV%K3`-dff5E_KC`q6v0R-;LN+FKd2uQCL5}>WHxRRafv7D>y_z
z;iBNhabjZUB<Y1o5D&pg7SL*(4{N3bLDessci{Q_`aB^ap)-Ge*7UD6Z0D0T3SRDK
z8ofR5>$Jc1|9{V)`s|{+{MTce;12vo&@AMdyM|N4p$2SHIIz=Z+l-v2r=~7TKR+++
z-kwUybDw-CKWr#F<n`dj#^h&zzu(`!rSkK$4NuShK2>A?;{o&QI^)Mro}>hC$p}2{
zS^Mo~`r$I&R1sU2S2o)}9E;M|%bPq~=&J0_Ev{=Pa^KxLKf1!%;OCXf)X3Dh?MGiM
zecI$Hf6`>u8+p%LyQXnKeCEQ~X#H--;Xhxm$1n4lX%rYR>xbSU`T9SJzrMZA4lVlj
z=H{}rv$GOc8dP7sSMiv)_>|`I8Pa(k+5LLa+s@3fC_Hp(f9UG4#GQRcoq9}q(<{vn
zK3z6_mqwDR^*bAO@4tJiUu>AncV%Bkta%6bN_RC54=>0FvI5gZrv_Wi#csVuyiz6x
zt=!^fOTDHV*;aiqkT%P)SlMy7o&USpqIN#ntXtdj^Ow%FtKBuzrgGEHFPFUit=t74
zs(yGrzdlN`$Z?xlZqZi-TjzsfdNCPqx98rzCY>MOE%^4<*3ZYZ1!f#D*lvA#o58;P
zo9mWLkCod~mG(2GG%oEW^L3@S9$`NXUCWL)`%W&~`sIJ~NpsuM3g?^m=6s0r+vz%s
z;a|W`$k5-+>_x|_WtIP4?7aH=<z|7^<$AwP_m;Vq=<NUV>9qdr-|zR|FY?+|_BQL^
z&*$>Sg^ye=uZ=eUe7F4m%=31?&&1Szy?QOc$Zp|tyT>n&nB?BN;xohGpicb0J4===
zn>9^0+D)|b{HxQIf2-vtR_{zc&iC5x!oerDi%MTzIeDk_dTiR+S*C60p1fQ>KW|m|
z`n-F!-(xptURFDNZ+3Lv&ZAzxFL~?F<de0Ecv$^@@AW_be!qXV<+9)AM>nJoZ;|mj
zCO5N<S6b>^Z`GfV$ItF4eC*T8CF=A4-`}%KJSU%NWM<cK|Ne6M{B7^{PfcX`*L`8{
znJsEt-F5e!eOvZ5KYqJ!yZ^dUi<v8~6?f|u?YN^X%q_k%>+avP*RDx!eW6&hl>M;p
z%GSd#QVZ&1S46C|zJBlS1BJ`i>+%A#x1av2)RZpKp1l6>!+*P+W@>%^Z@J>u#VBR*
z`~82tKO9^9kZrGW-s6INsY}a@&u?D)?!^A=U1vJiZ}`9L_QM3{HzqY7wy8)R-}{cm
zJ$Cx-(*DBPQ4hXMw^l6tu&h1m->G8e*z)`T>aBO(IXC^I_LW14GXEb<`X8bZz4Y<E
z+T&b1&ooZIz_UxF*-JYrwRCpB*A?BG_xG=xc`uu6_;BaZev3QL+^%e8+?M?yxc*Vm
zZuw%B8FA9`dA}X+PJDfe<<#R3ZBGOv*8j1J*{=5KUc=|*`7*cC3f?}SUSi;P-Q!86
z)vG|B%5}vm7H#<zE580VYxc-lyD$~BeYG$CxyKa?`Sl<FPWf2*_wk(mt=;>*Usnsx
ztX}orHO1n!)<@?C;qNyC=9*?t(+phH5^88R>*j{9+Z&nLqxLeCq{vk~U@U&S^?H=&
ziwh?{O=FPd|6l2Or~cK-<!j`&9_<#7KKz?cZp*E+)0#is&fi~oer3qYN!uIduy4-3
zu6KD?Y4&NxPiM=&zq@<vWy!2AQEjQ+?rG=e*<Le!!}H?IH2wH{MMZCR{J6JEdvV>{
zX|tbp-iYe!DV(`!n(%}<K})`U?$#@}ynFtj;tzY11M`~>UlQy6VjbKsXV>$k`E^g!
zC62xeHAmKZ7Fe`NZC}0Z_R}53=l{(uE!<;0r$ehi>Rj0lnfcP&lY^a_9tR&u-_Wu|
zSat5oWvoi>i$9t(UjHTV+_vYCPw|cz_KELPo@``2cUG7${8hBab*<H})l_%AFSVA9
zSE}Wnvt9Ds;qBF2^9v-8A2feB;qHV72TEo7Tsh)<9&eYAoBzl7(;O!Cv?YbR<(*>p
z9W}jQAbI_z$LAUOiLsGSCML+#?~r|0wL|W>%Rh$axsIowPoHA(qNI*}p8LhQrd+r0
zp7>m6QTl$1^ya_DR@M7{K2MD(PWd!x-6z-rJp<t%kGl1Jf`TS#hOC%yRUqy0^xibH
zwA72v4Tryddw)Ow)Ku;EvL(0o{CL#;Y+m)dja7&K{rjE2dO=B&-mVu)#qV}LpLJ_%
z_UxU-&!axtEccsh6;^S2n(j2=!?JwhyMI2L?K@pB_RQMt_p)Tu{m-pjJ})aMC}_=j
zW@YbbI+@3MB$H>E=jZ9|`;nA=)XHj~5kKSf-D1u!d}nNY`Or*Qa85+_l|^$4vL>yq
z*#6qn?2`9%Kbz{C52jt4GvU@^v5PM&cHZOmzP<F*?q_D5Y6t&bY-_)LhmW1<B=ZdA
zhi`kGqrN{6{IX4D(Z>H=b7l!jw{59xEZpj*o6{vXceO;@XOR!P@+ODm9G)<(KY7x>
z<O02Yw+*(O6}DOa?L@2U#mz=+Pj@u`E@-POl724Q$5LL~f4ETm)BnoDhu7+FJKdQr
z=kW7=O4aE*?;0u<?msMb({n4j9{S+({$Bp=kulGYwyu3}<L}%DWqWoddc5wR=|1<l
zSa`wC82|T{TW0f}_tL)4@kiZVjcGzDC$uR5YHRPa`Sam$SU-<?*>t_wTTx57dZ!e`
zI{j?vJaB2Lx2}5I&BBO?q@+a;?}%B+ZjsL4b5SZR?d=15KItW_v%Vj)shEFrwsZ3O
z-n|tJ>yIu>+w%X<bNglC>*K<9xb;d+Q%+vKS&92ceWz!u-SG>j+6#*Rg=arm<o>Yz
zPs_qc<2zGN)g&w|l<+gqs`)VCv$lnW*;+0hi{RkfkJTayU1d5K=R7~)_hi3o|4|w1
za}UejxaN0ydiczECq8{zV%c?5wcJBfYmK&TwcfHb>u{6m^sACvKe$!|``*n<cmMst
zar<h<M_q?5{xos66rQwffl{2i+G?9Odup~z%Vq3da__VEI-k;=RoA4pN*wNu@ih#2
z%X8mkB3JWzAC{z94R(hOYk!AS2nsX?a6IT-zVpK&?zM6=K0ZDk%^PyAamS}q+Ri84
zGL>Bp$h~rXet6!C{No!r`faD#9cs7D+SdHYI`F=nQN)@E!}l`c*5}OU=I{HamG);(
z!90<(*DW_GaowoTI3vDo^`3W)V$of*l6-|_`|Ng@8VlT)+^hJ0#@ye}7hPw)tDR@}
z;(q&q_uC%S`?&j`TD@a>_T^87JMt<vJXNzk-*B5{&dYSacIB)3r*84S+x0R%?(;_L
z!pBXn_6HSzv+UI?eaL(E@CU=y>1`{8gdbR@Cwb1Br!IF&uiwYdJZ7Ke)t6E2c1xBk
z@Z@{{VCmgxZtOk#dST@)<={;&g`aQU2tT{=vHF>(+pA>l^XK|y-#pV(@gnc@U;q8H
z#E<)#n8#Ny1s$(&W{<tt%tZbEZI{l@usJvT9$!JzqjPtw&-l#w{&367d+)xS-ZeLS
z*Zj*j@BIA1VZHKa#+UA8F*7fveQooVe6*KSV8(|>#>=XvdZoSp$NcP<wcHomB?TqI
z2gFZ&X}4Hrs+Rji`TNXmXJ^kkKCk-fpM6i(^OS0|zCR!G`ggI+{rS7sB!5dUee(a<
z?8wh`#^1m0zZg^TE>iN`V*$3hY5$|P9_yL=Y30xI^Kr#)x5LtYFlXLB_))$l%kJN+
z-8|0=me;+VmG;Fr%zx|8N4w?Ey!dWsquHzE;=uB0I)i%O=cCj3ZNE%-d3kw%sDxRV
zxb7!A({uNkUcHj!xWUIBKI8GLB9q$>9~2n)UE1Dz_-vn?@9d}p^TcoO`}s^-`^3{J
zmP;n^ou8!Yy~ZN``4RJYJq3s8tyxnob9{drTsy=RmU8gN`B$^7cHImVz53Nc`lT&f
z1kZzK!tavPlOwL^7Wk-bHvIp)%~k&K`{$v(agkx{w(5VHYus9krpj-9UVTM&=MSId
zhim!UDsoNR>N;+lo_Tm+)scsr->jegF{(Z%@`_CTr=1c9FH7wErqX{+YU`R#!%W*c
z&vS=X2OG{1To=MGQ+3tu_EVp|C-~B*x6eIS-n}sU`r?18!goFAUgOWrtUOhres;3@
z?woz;Pkk-?WoN$WiupN(E3ACS+&<pz4%@QsvUU3l_HeE%%`&x+n|jvwdQ9Tn{h7DR
zOrD)HGhH2euzNw*48`Vr({?`jjZ#S^%QARhUoqOcD%{xTb@o}QYuo2Y9Q)YWsguT0
z!29)_K=jAg$;+<Pu^zoFyMM-+vdfo$)Vq6$=%>wfcwYB*nQmB~=|{`;0qfRSKJU5g
z+nW`7_QCOa=O$!+-)x=!>O_hC?#bqJ_i@LGaCJC@2tH`ZtFvG3J3H;1^?RG_eXQXt
zA5E?~H#2fW!a>icTg6#^d^pU%)@|)xt!ti~qJm{i--J%~N|}ComTN7u?;IC@k*ekG
z7aK0@u$uXH$*sNmHG<3DelW?qbHncFYlWZBvg5luYaZ`aKbpy~S9tY>3uf=GJYIiY
zc4tNpw{~`-(|?8CyDXaQ=e?`jIzM_&7u%2D_3PqegtWUKs_pC-TQzToX=U=O(=XD0
z2u}XJ|KK48edX0fe%oIcT1d73=FQy4CH&&;nnSyN3(_m^COTgTGwn;cvYh+bn>$au
zd=H7+u@=7DY<T4L9hb-Jo*Aw5kBD{1bN?(-zL4dM%igoQqMZ-O^!XcJU#i>I`1g49
zhbu=O$M7o#$i-abxoVeZdMI|8eA|@T&d>dc>-ZPVx!&y0Y7snl!)J||)<JV~rmy|-
zZ2s}a6;jha*BhtJek}2Rc9Yx8{da#~(J1~t`TQ)$ew);F(tjGRN*^}4d*O%G#5qZ;
zLe?Z`g=yq_C9+P}lf6Cru=}NXSwEgVz8$dnv2pk2{!HnuEI&B1HA*B!Hv9Sbz4-Tj
zFaMg$fA%Y%@1L1aYya+0`IA?T-)96aJ|6w@)is-|S5}8AA6}XjwmwVh+S<8_dYpUD
zKQGIgujps3q`-91z2S4>yy|zBn+qSiWp~_sIz2wmG%LRHsc7~+*6A;OJv@G2&da|k
zUGVSc^V#e7{W^80>~`+O@Rb&Ng5ms&H-<0GySppx?X9i%IDf`e-Ms$l>gwlrpI$In
zw*T|FFBf&TDJMNYd8B5ernu;X7|m*qnfd$5`US-KSH$n%Xa2u}f6MyIU8^V7wz2(P
z>i75`*W1?zD;Pe+Uz>e+*RAWBwru_^#}<C96P6WQ=D4FI$>~sO%q9tMq4NiArtR~%
zcjRqJeWU34V+R@|=0EUwWzVzMpde$8W#WrtpHlukUl_BpA|~@_dU@=;ZdR^)&XF5G
z&8SX#8T3x}gTuWB<w7y5j<kc@E7;fZwa5JFe0<&FY@=W3WUYJcf9@Z&ti7Z6e0xOG
zFU~jhXZHjwzryPrwf2-=|B<9~@rCESVt(iGXjUH)@snvU(l3+zzW9N$^x^mK4tCgB
z7^z)N<(byhH|J?~e^0$>v}tZimzZW!*!ufV>NO7kYjkcuYreVODE9oT46TEeksB&z
zW;V!8mtLQ?Tg=}yY`y7ZyV=to%9QP$e|=|j<?q8Q3zT%@uZkS(KJw#Alvpu~oMWea
zGvAX1Pht!C&d=xYZT0(BAt<@_+B#R`>_;zV^)~KTj`4lk9$#n7Tos#qYPxRnsxYgg
zD$7&;yj{Y)zv{1BzNz(=%*<02ZMGH4q5_P99bQcQc0U&E{QK?p*=@PE*OX7VVXJ3b
z{cTIw39Yz2JIb3ZwGYgjHPblVEj9Mh-T-Dc9s_0fzAbIdZ}0BDUNF_DdeQ#3Gj8zg
z@IU_LwEq4rGQ#I-AN_9nVJA6j?!3b%U#uxuYV|C9XHn{EbM?(N#<Po)<MrolpI3f7
zCief<R?*<OeL~Xjvo~K1jyKzOwr$JY#t5w+_x`$n3-$K$wAuCPq1*ojh4ZdCq|7?0
z8~Pk}7QmM83-{d5xN<7%hV7OtkH^lL%Oy7#W@K+uUeU1a+re$83kBZ(FL`Voy}`Yb
z_wa4GBYdfmVb@cc_N-51Fq@KFzVP~!LidM{MYFeL?)rGVSa#*xV>`It-rm0F)vtwT
z&8nlgw=2Ed=IlQG>WXW-j;1Z!=>M$j@`P=X&mMYqD`rcs@jX0UEWCVa!lern!lEme
z-1M&ZKQlpz$vW`q36*5g{*&|TQ|;!@`^&%X@06Lg`g4RY><O$ry>RCKg3g!iis`9d
zQ%!#JFROj_<4e`eQ&+FWw3N-5o_yZb;`0}?WfdD2J=^Z;%`08BX@=qf^On6c4qy7$
z@cER}-v!qfoL|s+!9U=NwEgMxd+d(Axv|yy^rlOVn!#4NHfFN_lU%!_UjC2xwLP<!
zo$bxmn(RyN$D*8s+~3UetQFsMWL2}0fGNw1JwMK^PdNCdQZO*sL-a_GgyA6`Nuwig
zZf`$7OF3Ep=p0EYlSRDKuRSX|eY{_OdsTqM@zmM1z24fz91qXUwbotUp?^fD>wd+y
z)<18L%h!MD*9!QZ{XXxvzLrmF*1Vtk*_9U>6QgHJ%FO5~7Bcdl!d|#Aru^@Ps+tYe
z33Hdr)bkY;OgfWxKdJK3we{b6zi_foZuc+J-tp_-wc9n1|Lxm<Gw%LhkCw%!CeHl#
z_&nq5Gp-Cl!E=`tEL6F?$<z7pPTei4+F_Bm-QD&j^-8Cm=l;A=I^&w-AM@mMHm4^h
zHlHm^vC!!?OMYi#X(ZM;<L7!YuUVHBQ~xaee`T@Ojc<)RF0rZ4_Gvlh{NEukbNxPB
z&C5llS=Voc9FsSWnmg+%ulOYg@tu}Uor|;0^UQRmicRKioKs!*&sxb=oZ-jM@2$&p
zWafQa8$APbmSp@#1qZ*qtzQ2@Yk?dhI0`21x4mdTgJ)+<k-@Qf5;HkON|*wlo=7P*
zev_EIFY{W0s*zpjvX7oEe@zbE?|k?%euL1hi^<1*k8fMctL`Cw{Brj-U+L)8)_*rn
z=?-=|;#U_l>G<@wcXykwpD_LU(E^V>&rLs-skBdj;&a1cvSZt0iKz><E!wYlzQ~kO
zdTH-7sqdtB^8HrhVjbtm7dtLbTo~-{{_KvQ?3!}vKR;f-uV81Ce+1t40U0Y)_{HgR
zWTNJjDH&g<-RNuUT^e#yD9m7U=g%^!>fc{h=T}aTOk01NPiOz0%GYJ@Ys9T%7rf=%
zqmp&*&;7Mavvx1I7%t^}-a@cQ?3s|o+?;OrBTDxi=Gjzkl0N5iWc}>pCOfa(=MBiS
z4fuBAOY1L3nQBwUD(S8ng=hJr9v*7-+m+_YBg=VhS>5NorZt!R_P;;ixViAPS@z3f
z>vQ*te%x^6F}L0uTD;o)m|WO5TbVh*tA#p!SX>l@nIf)mJg!Np^i|N8-*Yqd`ra*`
zPu`jdeK*R_UT|meJcoH!MkN6+914PJx1O4MW}B?FF~iG_g}NqIYBz80u@|qFo%!Yx
zo7K7RS3k^^&e?Hs;=;2tetR!?($ycwv?uergJogL4S|=OUz;tqO%VRa?r85UxP9;M
zOV_klbf!+KH<Y~3d~sv!t?!Q~JeOsN6wUkE*4uV}VnUDYv>NuaqPFMEnKvphJfEF=
z?_Ympr|P+<Cs>>VGPyJw|GcQwTd?Buo?E-7SN27S3pAb(IPqEk^R&0tYi51f|1c|9
zO881ncF_KHnLHP-<=;zkfBtfsKF6JX#nUq{EtvOJPKwX>c1N6S<Qc&SDu2}%a#?Ep
z{AtnN#~seQlJ9H#r=y(*OLf_sZbQzmODMe5^EvF?`<k|`oB7_ib8f%5a(UrXd#UPV
z<dJx9mQP#U+Z65{H#%|n-TuQRc58brnYEk$_AZ^c^Ne<UmM8SAw}&2oj<ekXAK`X%
zC)1`GDhE_~-#7hhS=VB5MMsqDSelZcsOYz^Yj%d6=LnaSHZGd+G-Gr7rmUr1*Y=3E
z+0QbweUrLWcz@z)v(uH2Ow<0^*uS|R&<0)8p}+(>?Dx|i_qM~a^Qu3~Zq7a*w>t6K
zCQs${!iT@_xA}Hwb;WVl2b{-ZN;S7Xvu00MlaV}k_2KLb>E}=XU`x4QvUBgUbLWeh
z@*i-mcb0c*w0EeC>$wyA;KD=YXA>vt&vva}yTRy@fDhk$uOr-A<$reH=Ba<`ySb)!
z?}DTU*{R=eZr$(xZJK56|KM55`vtDw+nitDc4qTwednaNkl|$z>0l?YK~1mh^;LT%
z;Z0gow7XT4v?ca&C*SUijd0a@oaOa1;D1Nlr{9{7mF)se{M3K{zHeSvclexH(0%UR
zMlrtqwuW<p-PDiW-?`!P(c5Nc?swb#)HxzueAOK~lK!aCVIIHk=hh=_k&c;jeqM`C
z6y#`QnC|(^Ivy#;6qs%bpZL5m^`^L^CTOG|l8qsxf(cVmiqqqzR}kT<zyx7e`Zg4&
zeEKC7IHBHgMY2djU#HTG%>u&PJzJqmpBwK82lV^Alhk5|4qHGMQ8#i3cf3gX&vD&%
zI&|`{u|Tjuaq))reG=;q9SyX56MFV_`<h+TC;mL(@+i)2nc6O|D?$^Lg|}{ZV}<DG
zP-5cS*X%QGJB!SQJ$sv`?tYi5qTv24&TWC(E*BHw6R(%cU5qK83O;q&#es!OgmL=m
z)NgNZr|*^6<q#S(6|{fF8?<@()3#MX5btbsNci{XXWI97cLlXyO;Yvt`T6PT*?G3r
zPW#m6F4&rVeU@eMGl}JwB|rVN0-wru2((XmN2eBK0-aHiqlsbejB|6Xqit0jRnA{m
zIo!@~K2<y13^a&8J*KEr(m2g$-!yeO{kSy|hMx~G^RvWm{h$9{>%o#$x4v6Tm$WNz
z=-ka*(4vrgy#1G?hSsizxABeq+_N`LWz}&oWQ_J=(Oeoh<z3#izTThq=Vw~(E^oVe
zSMr9;z3<<vpWUl|ck=nMIp6oZxBPxPfB)2u4vvSlB3&gfFS*wJc-S5=sm1@!jz(>j
zybI4QuPxxer=@<FZI^0ZLQdaR#ejo{+3%|st~ci`(OR-0;OE(vqO&qHwV1_MKhF0E
ze|1JF|J8LJ>zexWYl>#QGGY6&)1r5h4(EmEO5Pb)eLgcV|NZ@a`s(%jZfUUZ+_|gz
z+nb5g<Le@CZOK#yb;JMsdcFSBo6YA>%`LyDd9+LP^x4q!!KJHXc5a%Y7i;yxN?bQ8
zWL5Zjy;d$!F0Hc*9Gg?Vzq`9B`8XeFN~7k(L3X2*69OxPm-_|0Jgh!xw?p4n)%cJ_
zDkriEU1iR1v|`b+u&ms&V^aLlm8y+??P6j~`)3BM^ZUmC_Eknss)J+z+Z4vs2Wuo3
z7#Y<5Drw$)PN&=W#@6iVDk>@`WXta;UXQQeYm#-vqm5TuZLwSLBzL(=m2PkQ@=ftu
zvqC+3-(FbQye4w9+6$Ap*5zhhr|;MQ*PU&aJBg8*O+q{Rg5;B{9eL#~J7Qv1>g5*h
zj_sKIalyX&Jw2bk+1Gz}{_=9o+%(rekC#hctUk2gj>k{G-{GUNo^tKKOWNn3$@<7h
zU(ajyN!DSp`jiuS`c|%AkIm=i89zRAmU{<;=`8)pIIsKn=5Ku$?woW`vlsuP%=P_u
zZ2p`%i45J-QtzDS+?Ev{TRQcn&%fXA_iK9It9&jiX`I$$uqNy3s$;FMmrjqH^yla2
z(+i#3pLA)jo3O9;_o*$Jm!B-3UpMK;kBY?fvL`1d+R0w}ey{p`&$i8zt3MtUKh4Z<
zlQ2a<Y)i^Xq0DP*W?ncIw9H2`_}iY!&poZF4-d62TjQ<2H>8bEHp}hz&c8n%_op81
z5)Imz<hn9!?W{9%EH6h)GRwcGb9$Pt_saUTHx=T$PQ6OE2wuOme)r|alMC&CzcH?S
zy>@%bqa&T4UM%iEHAOQx<>#lTMQ^uWKb5=vu3EqCw;3lVg$aN1zit?nJ+JDOrlfh^
zoSpnHn!er6-#^npYYMNlnaMSybIH42Eb4CgF7#eGD*9gC@7&Dm>*k*P{OxxBblvD}
z98+h$-~T`E*0$X2Lz^F|&t8{$E3{v_$xVH~PF`l)y}yPUTJsl8uIK%y_-|6&-#jBu
zcb+^o&y&p?>N)>e-I)F(c+nmIQ_O#D_|xtz$UpM-R!4B^g!5Z1`ZXoj`MW4RoHy+Y
z-=|#?mG6&!<9&HEe}(#a#j~oGnF^Oa#yC#d%_lZPeChMx`6oLXkE%wdJ^Ug1^5%2x
z9Xl244_1~(KB<=d)~$4QeUQPT_B)r;m!CZU(>N(1b;*BD$HI<!_DAt8KcZEF<b=yz
zOt-ubT)=hiFZ2Gx_cT89Pp*0LKrQ3>>l^z;o_}Mu_jdWHenhrj^3ndz8mUKj<XYZO
zI3YCMGv|%rmA8s5wk@Bp3ZC;UtoLepU3ca7Vq5mZ`B9Y}yNeDKJ#{d-^_}gN^SaYb
z)>p3nojPURw;6YePHVQzRokJUb;@C0dc$|a|38d``JYsVZDBmsU^F3HGQoY)G+u3g
zn~yCouc)c1&Faf(<q|#f!ZP-}#+;b_6(5s!e!o}UtK|HeLzM4aX+o5CNqF^-hwV@Q
zd_Hgfv!HcT-rZd%pU+=>U({j6l$g4oPd6nTWZL<9-EN=sU(@63CUWcV3AnaCUjOOo
z>E?4Y)YgWqoD{fW$Ne3J$v>aZuTOb??(**_oBDNLZZ(s{qjretMx{)d#wo1k;p5X2
zsIbx_$Z|<Q=fb4TRbR6fcbH{daCo=-z1`2B&*!J!-Bqf_CmMNA*u782@VIvPIuV{Y
zsn%tcpPxP3m49P{BWPao(FeIjL4Tf3kC$?7*Go&(KFk)Y^=`?^7Xs7UCw-8yI`V#j
zbB+0}Xx*h}3!SArHvRUWlihOfaP|wQ1&iP3>2<DS{d91`Dz$mhubd>Sgq}+|7*;SA
zE|l2u@qp?%_gQ}}>bSU0%a!bFKe54aYwDQ^hXRt+lyh_E$g>0p7Ia$wO!m>yRyjHG
z=Uy-8pI83et(|z>^~CGcD_^-=;&qST)t~+OpZ&WPN@{zf+Y-6OO%v~a^4|2?eac6n
zMgDF#HlJ8NO+}sG{AA>Mmo33D&m9|IwU@*%_WkZr^rt-FM1k;*x({NTBbDyhd+e(@
zDP;b{?aJ@iEv4eGzFK^e;n?CJ>Aa_s?Qp(T<&wa>P1<~4cZ;XiG0**2_vSoH)yaEb
zWA~QY<W7FOE_MFhsMx5ZB9p84MXF!EF8^s|z(=NupRO<Neez<Z%F6%G?mxTJ)Gp<5
z^U%djInNDumX#!Ie%-_V%G2lMpH06#nqofvvsb%&Y~Q}(X@07zr=R@@-jsKrQ_`}2
zyUwk$E6a0i1K5hX8k2r1KYf2(>ZIYF?_c+P-@92aca_*w|Gag2kEgwzZYON+-7a<X
z$>IMA4J@B-_2e2Q-OEca*&M$=#_{LQYT3>9Sr_FbKfDghlnoBC+q<>R|Bkcz<dnz%
zCT}V^9$1-v&hKWm&c)|@<TRL1IXp8i`0*j}=C<6~AuF32gE_2czu)(J-OL~71HaY$
z{512!@Av!T?f!glPOg2ph(Y9>XX5j#=X@V}*n|r%z7YH3>-G5Qa<)|x-8_e1*txVm
z|M`5rc;B(#+`2Y0md{tzzrD5fY-U-_%eF6vxb;uGxVU(!=VY~I$#++W>o0y|Flnvz
zTK*FA;Ex6ZJoBfm?v*x|TFv=<=MV1n(G!(iTmA&iTw?S6PO%erY?ae#*{QNoe>6f@
zhnc=yVX-~qqLbZEmSZd@?v`)%mp}HX=({7kMdT;fI7QoecT%FPYF^|&^7~?=!&9Jc
z-z-1f#U)nJc79CWqa(btn%>TzG~<tb&R*tO>*fjtF6!MFRAZ8{XU>$|iVQKY_4~F+
z9WXPhkhS`2e<&(2w$D|>Vc&nJi51e%7aBOU@#?(|G~x?CCaA-+{3N61jP<>DK3D9q
z6gGdMbY=C;%#AfnG4h`8_u2Yf_;7GyK-9gawJ%a%-n^bH+9e&r|2)s=`N|VJpU&9P
z9jmvuO5PyxFk7yp<d!2ME_UY=7@L+aT;bCe`ZDtUPpyo{IvT>}-s_i_NvG!gSRlRN
zOGVzb2S0qR`T6fNo|27VIdzTW<<;ko`L+S<I~l!}<Uc<;=gXgz-LA)2ml+AA^Jtk*
zX?x1;dDe*4&%5d(YiqC2Vc`n#`D*nGo41wx4!l{X;kat*DgC+GQzv{Zzcxu--~RuW
z9a0N;p184TiJfo%p4>27dg|L-3LlPKm@U1m?e>(Td(K}vDBSsM=7Y_JT85EQbK^bU
zWrc+;OLlmWWaF;|Dj2u&6g-)eG_^5ua~h|vVePLko`Fdf|7NCNINwyD*DI<e8P=H?
z@%q}@)c<w<_cOk|xmon;ist4D2J6j7uB;4hUHmh~?B>g3$F>~rlQou2eU>>@qOJVZ
zm6JP`Se{PZ5u9`Ps?y|BUMp&f4z@UW@rcH1P1}B_$h%AEaFAThh0EK$bw7Tw)v^ui
ze0Vc#ZPZMUEobHiC&es{`5Zh&!ffC3$;KB0@60;X>^&=?-m^>Su<+hr=Z?S6D!Eg6
zPFXK9V~T{F>zwD6H}1Uiytnzzl1aBe|NrOY{>N=fFq_yc{)h8S#l6mU_w8sb6JM%f
zX!Cm7)(h+X-hZ$P-u2}B6zA=?qnCTQ&lPyqG%4*!sbbX$30ccO!mmt@A5gWmT`u%6
zNZg_7d#(4KiT#I*guBgZR1dSScibzr{0L9m{hm)AC(mpQ-u}~f#dnb_A4?OIzCE`*
z$9AWeOJbHcbADccSKQo<cbAv(c>DkRd^1bq+B^x?MKPcDZxem`j{CINd+*aHub8qw
zT`zn5lJ~mjhx!}&GKAct+!!A>+SD=!?969T{huG9v)09Z$CF*VRNp?#yj>aHcY;~i
zY)MOatLf*tuanLs9L$c@Iz3f>{gjrJ@>7*}1KFlDo=I&e7STvOeMna>so(Pd5zP>>
zjJXS!9_^ByyZv}f-F2?mgBn}7ScJ4>|Frc?T^qf9(f((AH)cLFI$cug^}5=yBzEzI
zy`D!8JFKWF((_hGv=cekUC1?ke_xl^{@c^8{hc`T^X@sNr;h%fv7m3=_We6Hrp=JO
z`?yAYy7q-V-7F=47R!3>-Rt4*;o|auU$a-@h5-NBm?id&Mf;A5=>@%0^|*O^;;r)Q
zPrd#!f480dZKC$0FWX#qTq;`G;S|faK#se{sN|%|mio2+Gi)WyUTE$(&)ep-RYb?P
z_4TcgCl|SwFBFhGIpcoM=MQO5@7DY+U9x75J-7Z+?*q5_HTrGp+Nzi{y<bTiytUZ*
z$j2ydoB0*})`>5#yb3Vfxy$dC?6xa2=Is^k%&U~sGcz`qF;c#>-CXmrylemA`Hh(w
zN@^!hu03G(KzV`bg!HA^hp+T;N?%-8WvCgd@z>ruJ=t8wA~VL;-{JB4z+DYnRs<cX
zjQT#6F;!u*q(<PPmXjCH1#a+u_p5%%Brhgiov??VtEIM0&a&QkzxMmwnRCu_{_-oh
z*Ao0DrT%}BWP3Mbz{}2_FBPs9mv4T)^Q(N@-^X*kk1zc6GDK^x&CME%>Bltp#Aoab
z=4@h|o^5G$z}N8l<2_Su$n&h~K3C{-YU2ql;pdATMS~96)huTFX1M=o@0WCilb_<3
zXf^Frdz)~#l0h#h-zoCL-U;aw-riI#Og`6Y9i`;;?YI(qzog3v3k^=ujUETCHB|0?
zQdh85=IefecUiR$;@b8JecB+zvtROQ;^$4b|3*tWbnX<X*~?VEUHagCZz0w0w=cJB
zt8|+9@XxfJv-wU}x-NS9Y1Pr7TbEe#l3We;KiFQ%9(<_j=Fgg!al*ap(zKq-Fv}c0
zz^rqfp|nwyL3DQMfz1x{K3Xnunm6Hz*){Q~#5E;fcTH{m6jP9QX8neMpjFen?k~zc
zeQ~YP0-iaN%wZ2VOq;gMclNBF9kZqezu9Q}-bOrQ{_2T=EA|v@)#6^4Rr<Ks{LtAu
z-}UFqX8JvN-cr2FH8OFpMdsY%<V!;5-Y>Ya_Vo1?T#_79{ok*OTRzeK=v3_^fr_?u
ze=1H@u9I6Xq{6J%m_0@0(48qK3%YlSz6-poa`3OP)v1;B(<ZK4sCG{C3~#6OA9-PY
z#fRcYI*%M~>inOskoW$3;))ALcr&|y8vfn5z@dI}a8c4Zo>^{g1$sV~o?ZW4+xU0N
z<-X<PKCPjlX1df}UZYrG`kbb(tY2kIwqG!MH0`m)It696`z=3>!<Ej4b7Xgzq;3>o
z5_dl_<9^Sl5Blbs7n5a+Pd_zEx*)nEt=vOtZPbL6a^}sCW?O#v`>}TQ&v~aq-R~zX
zGG4m+@YFMXF1L#hhE=?F<PV=}@Lgec>)aPPtl`^EJ~EoT$>RT+XW8=>@3Y=u&$5C)
zggK678*`=8H^-kd8d;6KM0zHDi(qI7lA1O{J7MBO8;M<hZA#I5&L3sH`9G#5$hUVj
z&yz~F6$w*xB7@c#Ie2>>?~^@yq^@py>-`y(Z?|55a#DT1iCBMvdRp;PL%ri?JRgFl
zJx>2BDzXq~cwjTbLEbMVrti!4{QLLTG<>@n9zWB@Z=Ox&v=qPY94+gRo;k(Ym%6I;
zTIXxd|0h%U)pA|JF<0>)554sEp2>c-zUSBS%gOr8w!i(DBg+>bu4Pm^RL}V|g{SKE
z?1DYot&7=z{o#ARDf)u8HfwA8oKE$3e`Y)`+I!@i=T?>-7w3MPp5i{^@=|T9`|KL&
znxEeEhhAQGc++Q%HO3(+LW1Ag>dl_ru&<x4{Jw$n4^wPo%qfAi&#BhRO1l@nE#0{2
zR`~@kH-+}E8*08V?8z%$XJ}DxxNF7Yl=t_N9=wx$lf09~Jm&Ee@dZjB^gijRcY5|8
z_L^+CU-D@q&%w~Fo$D4^2hRT<keBUQ^uJBl^!v#JN^kD#ct)7&Hn^~IoSy19IsEu(
z!8IGMEKDnWsO8`K+4TVHvK5gldz3FP^q5lq<<|?%_g}eHKi)j*E6ZuIyQaynwy(xr
zEyM1Q(zN4+<_oMHjE|@vaCewDvGDXaPj?&PId|+czdG*R7w7r#@882+%tb=$rY!VW
zQXcWR!fBhm_`;Cj$6Cyl^BBwAR_-X^<GOpu!s!25^ZPSWi+}n`tV?j4uBX?1Ca^+p
zX7!^ZoR{<J!}Xs%<yowBX>+>&VvdWty`QyqtSO56oVMeTq<z^*z8$f(J@1mXux-h_
zd~Ay3iqB`>UkvEfjM$JcZ@=8kn+*X!c#f=I>;3-T-d<_R{pN??GsG4gWc~E-_xstb
ziaO8D_TIg<rE%NABb-z8{M*02VSeg*EJ0=eCJ{Xm{c^jas`QQJ@?}@{PxNP-K3(|5
zmEu;t<zL?2K2WLg{mx<bi`(_Ze~Ykt%uOuFtX{mxMfJeWRqw+l|2u5%HZ!+h-9l@=
z)OGuMw^vucrwN;1=uh9e{>qo0%bU-O|4<ED8h&5B(4nH`{)U6<Jx3n9l}EgLU43Jo
z(e1d}_s6Gg_@u03{PgL8V_rc!4$KpI=f)$WrMmj9$wZ?^+n)zZJ}<1=$^W>@R(ayj
zQ(j+}Nq@Y!#6>5XcltKw=iWK{&u%L^m%B~dC~rmals_}>KhiI2HTqv5S6p}cyf*)d
zwY{fLn3j5bX!bZ2t1f>(?fvnTe;2HBt{U<;B<UJIbErQj%2x5Xlyw*Lm3Nj8?ZsyB
zK3U=7JUQX%pZCv%SZ(?>r+a;ye*c>6B<AD7OMSN{9g|94JLPfW!8Kxwx+R_;4_K3W
zY`>0e7^w13Zt#};%sf3N^3AO5brT;Q?LNJHeqGkI<4c#et#jRW_*78ky>qRF*P7(*
zRMuSB`||br{br{)-M{Q?aEQ|LG2?yhR4~bMs@i``%dfAle%73=z2IYV->X+Cl3(to
z9eMt^-(Ig}zlC_%gH5Mnie`)LTI}BM_3%*ZS@TprIU5Z_LqoNfyykZv6vt+q{4B1u
zxbdCz|BA%gId1GXmY;33vwJD5c%(VbKj*XO!@z`^rlKbg_U$`ZJ-5QnVduH!^BQ;X
zh@=U#&Tj8LpPOU9Yx0s^Q;r-<`pC+;;)ubna-M0sZz}KZuh1#kcKM0U*5uwt&psUJ
zylkCgWB5*9YtuZ{^#6}b4IghebrLKSd7l$?OeL~8|I~^(#XMWwyI1Zg_|mlOlAJ@I
zmSISM-vKTm*_U3CUNWU#6Z!5vSF&t4bnfcWeerb%A`<8ATeRqk_4)~Gj@IrqIdb)f
z*7OtVxqCjJzxK}e>q)<rE$=j#PdO|zy09%bdZG7C>HHs6vkO93NttH-&@fC7Dkv%0
z64|Sxta@ywx%=P0Qzf2P?)KMay!-gaLf@%dBsr#?cd+D<GV!>zHCz49I)SMYyFOjK
zb@=tGm^jOTDBfVUbE<5cVkai5cNd*^O<n32d7i_d|K;s^<uKL7I`>6)C~r-89eA@v
zblR*?og6il|IMoD;qQ6lBsCk~XY8pf70rxVXmBTQ($mMS`@~#=cl>LyW|rCXilK&W
z$+^6<55v~qeZRB+_tv!vs@!hNt}Kgeb=ncnJ@fI?-OTplCHjl+JPCbwyjrwQSmFr3
z{X9!9`8z!=-nVLW&hDwGUbj%S<5spvs=|At58rO*pYD+~PI-B0Y2`A(KXRP*tA2bs
zt^c~FS7%RFNXm{QrqA^XIrE|tUz`&c7Z)$s`9^0=SK3;hCs}NZHtjBpF<fK4@9`ng
z6KZ#(uWP7zeh^$2qqJW#^qJA=!p}j^lYU<BpSwjps#<K?uVc3xKleYj`L`}Q_i<I2
zf|b*%!&loD-z#yI?s$LkgYrzRQx16v59V50r#|t$dWJpv_F<;Y1+N7c-?^^y_@n)O
z2G?|@XPvB*>KFW2f2%kqYr1YnWo6#X35`mZSEh0PTo<i7IrQbBz@U{Cb({Bp{-7z@
z@bqNp6vkAAYKaA|ZFy01j!%j`6ne_7M<Oxi>bWI8dJ+ftO4mlsD?c&w$-+#dw1)d1
zAHFJ{<9_r#&qukH@%#7PSi0NzoMW;E^K&-lo9j*-Ih^}u-TLdNHow)IFwM|)DVN=`
z^wiS_Cx5!MC3jQB?Lg_2ZS1n^(%vkwy|>C{ilCwJths{xmX9vRws$j`95~LT8FQ%8
z)~b2SgPw*(N6yXfGMW%AF{4TJ*t{Q^lWi)0{tQ|eVYhQj(ZwmNe2;&66k_n^Np1D~
z+q%k^-cDMVy4hTATC97G=og3L>{$o2JRiPF>t0}@+o&xyb)`e0ugl*+wkeF}cecEk
zD<dW(?ahBXM)Tigmd%SMC6?-LZd%}T%Gi2cs#<91=98;=W=t=d+SpzEByI7cK+b(X
zSrc_{3e~^b!WR*zYPz-xG^l?2KTGM=O<UHv9eZf`HD*fl)=f&&9PC(gYlHuITGklt
z2^T#5<ayKkO*!@FjP}hoX){{DqrrTt!Ds<biW&3GyorTo4U6~xUzK{qv-6!cv)jV&
z+gEAmg5{ygG?@4Cr2I~)zyIi~2V^W9IwTIKJ|s7+ORaOPTEWv|%v`m<{qxnU+cY6|
zXfX4!T~T_h;9NeH5jK<#)u)heu%OH5-VVPgZ!{SP$pV7~T|Lb_cSAu#?@;w%s&UW3
z1}&q?$5xrT7zQ2yN%0w#gk!k!6C1PTFWH%U94B52LP#F!V601xu%9#gtN4`F9<LfI
zuYIoZoBkj$rH%KBs`uu{W-spg$IrQWusCh%!t<_ahBNEze#I;~-#_oHF3*eqDc@o`
z-~5<gvh!}0<>p^SyA&d0+n%lGS+Q)*cTnKM0}mXOPb}F&CY~&w?xVd17W!ZX6Qm_8
z{yf{sDxYn=*ClUFw8b^g=O<bohh8;<hu<lOWyTwFZW=}It=jtMx&8l>U-q0=Io!m`
z?e+fN-lt!$$J<YS!g%?~HZ!(0DSEtbZWdiqKfYadkLcCgcQ=1M5>Y<Mr>khEl!(ro
zTNOV%J_#1OevvR-{@H!!mS2^dqHZobQFCEg_m>sdEm~E6Ezh&Ainu1%GyUW1aL+sY
zy;V-W-dq*y!~64}*Lm+n@8ykCK3uBQeEG(2v(eL(i)ZhieYpFy*h%Hh^;7-CkDPdt
zUHN~>se_Z%Q_mj!Ym|STuQKzZ%+1n25vNX?Znsx3?V3Kty8dtAv2~I2gw|P~aws!!
zc>eH7Ty1Ta)zm2}ulbLjKJ}y4sN`kM%+1Bor+)h1j#n<fwo1RIEUU(j=_B8=Ih(E5
zo8~M3ofVtKr~f~1&6!&pL!BzW#pTxR{ImXM>ZiYLrbUOBaGFKm)q<qGhDLw;zaecr
zl1e8}o}47TtRZAwOr+Ozy}2u|t8L$E7%V-#b>`QszuAkQENOWc;hq$*#HB`4Q1If(
zlbrQxyBGDj%jh!juQ=RvGV&GY)RWg0Zi~zm`uTbGD)j}6x23$-J1H-zYP!&6%i0K@
z6*}Lq=BnuK|LfDIs`krdWA$7%M$wt?N~M+^*}VCr*AxAOLws{=6tf?FpFQ1Ys?yOX
zi$0$-e-XBOU(Dyz7MFJ>ADMLTho<DqKBEcFJUTb_9@fxU$|W)-O4ZAA?xdwp+g5FT
za@Do|#O2TFzpixE&nv&4r*U3?##`ZY+mp>TtipokcR!eKTdSs1y(^UMlNz)4biJv^
zdZoQDE^>W(VxqF7*&^3&v7j9Vi8HNAv)T%sI|W2VRj<cYuU!+fGbn6LgrHYYuQU_$
z>aew3ir;ohi!wLFR4b&mvj2bVlXz@y#k`a1PWvRxc3+OaXQTgkoB!#Xu6vIw=5JcE
zPVu_ZceN8`XE*%*62IhUi~rJ-=JP!+^535jy{tyP-EZy`wGA6=>H|+-p0O%=Q%_9H
zwmtq8`#Lywu62u@Z}9x!nYYnClGA(Sm7lupSKn$cui5?W&6K2{pH6#3w9T03q|NtK
zWs0_Fs)D-Y0-^KI7pc7qwG)4T@RY~G(x*ZikN!Ee9@WzN7IWMybZOGWE9I%Dq;E?r
z&Pad1H?{j|gcH*|&ijn4(QSEC8B-O?jSGb0I1YN%@fe(&W$Jye=JVNv$t<$(r$y&Y
z<P=up*#1yyZS?kOhRMffJd3G#*b1I(JU7?6_4j0T|7q9bs(oKwU9J9p|NniGGo(y1
zCR|w)8T{$#>2#6P=jK|UZsV2q`Q_ZsH?t(Z{_oZ!$Bua&?Go*Es{H%)`ssDCyMr`-
ze>^VlyN8LD%cE0BbxH7Yzfa%q*Ds%we{avuJzuXyYaZto*PHU~?QQRkNk^CX&9(Ye
z|Nnb^(W@&f7o6s~a%{?(&lmi1R-fGa@Y&bX&AMuFZEw%Stu1I&*c;{d>f+|3OP<<B
z``O*xeDp}Nh56R>+nt6s?H~GQwEdoP<Mi??J-LP-^IL3Rb2|PC6V6Y&ad>U|w5^GE
zzU7$ijNcUPHp^gp&F$4sk0|cdpMU4jkzB*IyNuV}?awuRxo)1W`ajk=4mUqnZJkrI
z-S+zS!&e@0$g{q2zw3W=+ngO-x7puR-d$!i;cb^s*2RL=2M?OR4m<h(?BBC2-&ftg
zQfZZXKeIOR!PU@J-ZPt8gU+RI+5GDP<IZ2t^PjM+v6Xq57rwE}(<$6!*GrvyyNws?
ze5v_iaDmZxCR+(})Bes)8~<+cvg1*$J~fS3i1D>|&R(_UQ{%QqaPIu`$II^F-E8v@
zmfyll=BQt;`nIm2bW1?V7M+$y>lg88FsGk)FnH&F@JOO>Q)l^QUvtmHZM;u!Y)rPB
z%0FdVqTszo+xaIH<ycP57MocxIcjT`=gOd^KATslOwkVa6EgU)x#DBe!b_L-*Vh-v
zY|RQ?9P{P%_47wSTZEoGo1JeKwtk*%^{g+R5)P@ms^9MoU*<bot^7`5drZ~KrJoM-
z+n*5jw@G|-Vb;;to=e*+OJ85}O*=nNH$1*}YhiA(ik9AN^L(?9&(F?Q*Nxt0avZd<
z@yh!6_<I$Pd4ImI|DS!1JveQF*{ma#Z`2RH)obUAJFQY!+n#Hb_w{}C_8FHnZ@&G<
zu(%<+;QYPj-TU=_HR!4w{K_?(z4@${O#IJ;-I>cKOH7<{NA6(3|H5jHBaA%Ov4Vz=
z?nX|3pD3k0QSW*aM`5+qX1$+gH{Z^h`^N9tiKN3R;Z4V5Pq-Zu;|ldJX>wn9Wo8A(
z?!(V>cpvq5<(hspbL+OBW)X3xi2IF)zQX<J*hf*E<;MhVqy2ZaJP!?$JQ3)6EG+rV
zs`{Qufg)m)Ke2M(PPjaAvG%R{#M+HvBJ<wxZI?QAe)hGQxd!%gq(2`%W~!zYTdtu#
zdGeEMEAO+<$xcvu;csE&psL3AAVtg1;U`B1?-MJ@oi!_0PTm`z%lfwVeaMII$2S*h
z-@4cm{ONbu`T4VY=ij^b|E4vg_nFh&&rjX1*Z0w%IWL%PN~4Lyj=$e-r=Fao`st#(
z{K-YG-7R91WUb3igvZyaKA-<SJN9WyVVRx(91F$h?Rln)|Gtq4*4zEYX!7~R{dQW1
z+juAIMsGW@`~ALbtKIwm|JxlHB4qetW8q`B%7?AuK~rSPV%quT^F&1Y9@PK&czjd(
zdAZ9U@9wYP-y>~)uIJz8$7eQaUR@n-o_dq>jm^J*zu%|c-&b4o<wam}?u#267k8eP
z-_|PD5gu3RdTniV`ln?}AMX48&bZL6S4vfX@0Uq8(~r-sDbRC|6OX%`y>90*ljJuy
zHh#L`%)itm^VgS`POneRGR=OnYW2EFA0Ho|y;FPt-*2-Yc3xlTJzcM@dy31Ag-=_!
z&OSKU-0O6Cfg|(7uS@^@d_MnB_Mca)*Pm)&WIprt`I0&<|JT>on-|Wp_qY4GBye5D
z{fkydtPJjMzh9SqYD3{=ZR5RWx4u-SzGmAS5Md`}_x;qi!p*z)yzlc^$I<$1`imsh
zUA*%zSLWQgbMC#<%1EBpg>9LWwX1Bld#d}FJ``iF%Uzh^uybP7-rIiq_6ZHrzb2MA
z9KU%aSw{Q)`Bx^q{12zw^cA@F${B7y{r%gV|I^kbZMe7RQP%|i_VX`4efSVLcfX9?
z^HbK5<|4oBh3`LI^+xQvV)(ung@>QTST6KPy#6*I?OvZxk^8Ub<?H01cPUN|o?){6
z?+w9~ZGq2L9qn2kGx=_SomyU;{`Jtcn%9#U8$P}cKV7-uNDON?&+~%;h6_v8B5oGk
zFt)T>wqkkDjr+Yz4R1V=(zlT~u-o@orQ`gg){I(atc`&O+7Iw498X>Ry>9KRz45uN
z`=WL`-8#JL#@4Fb%8Y9vma}FkY`?ZbnRU^klrN`J6(hC(&fm))zGRoqjZY=3c{G?0
z8V798i_N^Wq?6Hl;hPuXt3pohh$yt<itXo<x6}E4zkYv@jOC>YR?s$84RM*m{Vx`E
zKbhpMXSmMe%<-3IE~)9Krf5Es5oOm>J3GsC^7DDser50P<;Kj)y1p*=*@+9HnMXPV
zm&HsoPd_Ik?3cPU=jNtEeLF&DNo=qA`Kjn2tGGmKvut)*S)|-r#**}`%gcH%n0C#5
z6Sg+W6SP6~$F|(tJ%@yag_Y%NJ~Y0pDgIRX@zK2hk<Y3h9ALcs>_Oz2YQs#`g;$TB
zD!*T=zQxP+@S(rw?f*+GEj-x7>a@Be7&Pm>V_Ra$+eJzq3vWxbuAWplPjIT|J@dU)
zUyUA~lh-<2{QR8hHK|?PQ5(5bbZtYrG+$25yxe-)Ol#4D+1qpj((D>fU%%7h%>KSR
z*Knnxj)Az7VP?zc&ywnSW$j-J%%hoXo4d|%Oy6;Phf>Vng(Z95`q|vfJHI~o+CjBo
zFC`l;{>yVZzvS7j)%x5xcZF%pd7EMxAGdW;9P3RU+-hwtlfPlkBQ|UOlaxblJEs2a
z{MZsHBx?EV?@Zb0b3A80oTC;Vk>XJx5)yn$u*bwA^~$Q$XBVOp^VUcDFG}G0Uv!o$
z`Do#>vl;iN6<%52({m~HQDxbp&7v};rym*p3_EwtY;J0$Ow*RHOAi^bJmC46b)fq|
znL>ES-uV6lryfmdo)P@*ZQIo#6|4DoLU`ZXcGTLr9o{9mYRR<6zu&aZ>OU6FHl;Cy
z?a|}MPak#bn|&6s@Ne?|78LkEqi&&7Ys!NIjc%Pg-|c$cviQT*RiVOVB`Z1Zmd*aC
z=XtWH*f;CS3Pt1eb0%Iqx~e6)i!OaWz$~}DQp@C@Q)}~~O07G~Cmm*GkTT6m(Qym0
z)A;}I_xg=ud;fmB{Ry;{y3f@8S>?R1i=MWA&YV~GD^vLXzYT|DmwJgVX1S>3C~myV
ztZ>qQ<$jx_MHiJEp7x&dvEY7w(xSCEJ!9@-Cy#LZUEP5jzlv*B`4#T^^)U{#lk?1-
z8PDg}$F1O)t{HqRhJD?vXChJ0B#v2{Zjp+-P}+OwSA^$!A8B=&ZF&<Q`-yM3F>Uhu
z=F``?ZbaoBT)Rtj+vI~gGME3}W}0MkXU{GBlAl-F0*X$#yFQ)&s%Upzxw`l7B`GhN
zR-d@FBU1KU-jVZj1<o2R^jLem=uEe6LgP=N-_GAu)!cYz-~D;~MGb4#!L4rsr|h(i
zoFreQuBLY^X4dc8*Q?#--YT|NYIN~M9zEphqia^_zA$&{3%AYhj}*QP+Bs|Eu_a|U
zY`FS*#m`LpoUBo{#CnFwOd0m`Q)e!on;YG?E46T@NpV_qwb4QyzQUWoE}dV@zd>ff
z+<%b^m?mhe?EQUJOQ%LIEAi|8)2G5k?5vHq<@2q4_tLgnF?Gqrsf?)y+8g=p{{-x4
zl1Tmi?d_*W-7@NemoGlQ!+LJ(g45ztx&2P0wSRu#*nj=N)bO~>N1xh-_ggBsp5}9Z
z`ZCO=n|-ZXh>(SSlNaA}mmjC&>;I-U+5P`h+}YhN-1qHrYVWDe{iz3*rOI&W+JKfl
zy==Iw<^1x(!sd+zGnoGbY)rI!v*5C5Z<4Ozvj;N23iTR&TAQv<=+b=89JQ??H-FE^
zV+$l6-ig(obKj}lbe-QZSC$nm7q0enEel>#<P>gqPyOElr780)PW$M`$yq-)wt8Nq
zZqK28+9@I}>cG<1mb<1uw3xryH}K~JvuD#5?O6WXxiRANyB_UrPLF@^7T($!`|g~O
zV*20MrlO0i*A@yF#ViPUINPXJXF<%`mZHaX<&T!OCm2f2m{79nYRApF?F-j!pQa%Z
z${9O7c8Br1l564L6FE-usuUO%tE|zv-6u26xo+|KlG>j#_bV!QJZv@kvPpYM;mpq|
zPaA8E_)lNG_U&9L=Q7@_54T-oU2PROP5y?ok5I4hJf48gms}<6Q4C*7ds)+%ce+^|
zN)X>4(f9jqeBIq$7j5+C%=tg3e5?Jra?iiNKl=;*u-KcfU%<s1$~L7jL1KaR#Nans
zp(S5CJ39@RgEo~(T9vFYo^^A(UcAkM-2!*pG`3{jDL!wT#$-NKv1wEJ`?$w-UqB=L
z$-kYo9ZpVCJ-bJ$tbkKXu7r_$$&U+*oVdB4*8l&!<JIjwJDyhVS{k?_AkcBM(5b9U
zGhd6`lf9yG*;@K@p42~|JFiZCs`NQgE?0~EHm=iuetvG9{DE`VkBd#uV|Mmb7TJYd
zT@~8<$>r&dYwdYyYnZ1hM4djD+F08Bj(2MH_E~pcEu0qIr|xkn_JU+|QbsXzn8a$6
zoV4e<@6W#2W3=t*lKO=={))@=DZ~dP)HVP7!u#jou>__w$?2ybHb{o69J2Z{Z^vZ$
z*44N5%3EHSdrmrLacqw4bi-<=g_$q>%I>ps`#7bzT-kK^xUkfMd94@BBZ{{@ZIAvX
z6Pj`(a`$pS<%jM+G@7UV$=vqOe(}RNH`ise%R|+yrQXWxuT8GA`P|)iT{1OOY+BmC
z<nULg&DWYp$!+7=;(mI?jvl5L7vAjN!p~F`<@NZtTmH5j{j;Y-4NfcU7fpC%YcKcv
z=B)aq`Io()^1a`l>{O}ORQs)H7PoG^<cZZh7kbZVzR3t$QBv^9zEw85tZZ$;I<Dyt
zpJrI?zu~tq^$6E~-ot&GpP&7aEX&F~xsdyk`uv(CwbI)=-kd0UYIJV(f~^a3Z*NOu
z)!9`WxbWxG>G6KQ%Xr+MW->l{VHbOEj`sY(gA1djo<9xv6FWO+=fizVqZL}i1V81A
z+NbYc`DSAMuF9?1*Uv?)Sz&X@>A0nFh|i0>8508nmwGG3GOwKSY2Jp<=`Xgu(2u#<
z<867r;4p8R9`Ab9sSA}{yCk?LZe8<p`DgoY`HM>fzw}R^-ETSn-ecE!QKvo1j}&{Z
zTe5M+XCvWtQF$xoMHrs>$|cgae6sM}zXF$Mn11|kA=mjj*?R6uL&^W|=dD$H+uHi9
z;?9|K+-=KWtywuUnV*YuvoB}2M^}luqjRCVn_BW?<)2-l)!(ix+{*gl#NG#fMdB{^
zb{Agm$&6Zm##$!q?E9`)t*k5e6?9j}BpLB~7Zenq+_Iv>X?efFk0&+nWK(u4eSUOw
z?UO(a)h!1ucqQ2<om`scUJ;)8X+KZ*w>OjOzrCHvw)FHPxx&-SSJ%4l`tvwGe(SO`
zyI0QUwBujB;An-oz=EU4>#UF2_sY#T=K0e#O=zdrzaxKVbqMZm3cMJ^KJD0x6CTBP
zzV}OR`q^=O?cx207Ki^!7LjZ_e{W;xHQBE3{f~-k)XvZIXR87YXF3`m;8V3;AL3&1
za&}(Sl+@GHW?E|g*~adE?PXk=`*q8W^MyD4tQ9IN)AVa?6#lw$!-doti{F&QCP~ie
zKinU6=~q(g@j2#>M?2T0FLv)Ydl0n$O-wN(llH`l`w<IIONHNEv?wK9?y-km?2?G`
zb!SAoEI#w8#^&{&e!r)HbyNDh%40g&GA9-tt27KzsE~b`bgWYU$(-xczcU&ZK61J2
zQeP)=y{(Xu@tpsoOXp%f_8fZcwe(n2mCu6j?c9t7Q!V4n{oXy_vrK#44yPAk$86sI
zk)FNZ_qERC#DBSGy1UMN*f}@eA-F?C(ro+XDT4euYc`Zr)+}B0j;WgMd}iqkb@@E&
z+KT2V*YcAw{#SNb#?Ct~m&oF!xm7s1%g4Jj@yJ7C>&vXVu4fY~&m}+qw_7-S-jR>W
z@%P`<{n>6iS?j3p`#mS8JpRgCSzUJgti}CLAJ0wplC_o#+~s0-{hj0fGMf$qQG3wj
z$V$HYx+Z1&=mimfIA5P&S>yKYVAuYjRbu_3CpF4W*Y=t5XBM_fOrN#bU1EyTM@Bn_
zGETS6nlX2C-kn<u9uF*hDARe|B~vt2!Q9}0`D4dByN`ZMzkKHR#p}TZzavEV$Ny{n
z{dmIC&mR=8$7+U_+VS(m2=4cP_W0++y>?uyr$#=smG@|>{KmBOvO%hYB*$DvEiReH
z<wgeu-yQ3fmM+&ROM9twOm&i_;=Faq_J6-j-q=)7$Mrm@VC%+ZzO(0rX{<FY;e9+u
zQtzqH(L-fMJ7x5zDrs+-7jW?*SH_y-=k5RJglT4TpXO=x)(@Wf=#SVgrNDy^JM5;v
zU8;20(zeI@X@{Qo7K07WvTGu;`NHo!TEt>o_2omME4SJFJ7WFUkJfEJdQJDjA0fTg
zGgpu9ST+0a4b{iRM-7TjRVxMTD!*6h&Y}IsjCXBWN$BDr?`#=S5$$t3{r0%^+yC3a
z_}ar;MpRyV&VCV}nT@wHKA(7f`^H4^i+eWSyUAzFpVnrq^7~u<&ttC9XOC?WSz@kU
zzL4`uGTYgGSx;9^+T=TNXXUBy3T-DM(vnJUia!5+Naok|8pSzAX<zp{Mf|L~>-Ezr
zZ>jB-wMG+OOKN=l80~m{qWnpfIhSe<EkEbH`F`##f2Rn}r#Clef;%ti#~IW!f@f~L
z-xs;NZ0*l;U30Y$pVPEnxaH2Bs@H4N=6mt(XLfFF2@|V-*z$k(yItOGJd#OnKP*rA
zFLZtWZufh$-{(p~Dt@R|`F+@a-)3QJ+x@u@x4UU<*>b_QxAG!yuU_yU4rS}hX7yU0
zA?tQDg}Ei2er|L&WXp~u$?3UeQPYf0DaV`(?tHLEY<a*+&d?WqZmC8h8&B)CPjY*#
z#aua&v7a;Nh65kp>J(Gvy}y^vklDOTW43*az?~n)r@s~D{;aH8ectt3%)T?VdO!3o
z|F*4OcV}mqSHS<tC9fCW7M#ja*I}ThcC04-sO9yeo-%i<Zg?(bI*>ZGS#TFypyMKL
z;i+=VRn-Ej`py(HGDc-uINDz@I?#B@kXM&I>?fCqI`^SXkAJo%Dy=v@ll821`KBeU
z9Ft_Pf4-v_8eJvDd^2sL<=Pb90NBLEp&5;jkN3~+{Qc5&;xw+PC?~<Fq8C{rYXTg(
z!aR$fojmynv<`3q&yzH^kclVHhP%IB2ALt4pf0&VFRXIYr-$*GYZ#syinr7U7jJ3J
z`gSVt*Vckm1s;h7*^v?^&a)Tru$*qV_x_;K(vO>37<cGQWn7i+`E1ro=tK`cbJoO<
z3!V9=K<8f?jnf&J|NeQjsjl?r8O<wG!1EI<XB}KF7@97E>OrP<wlIdK6rF6nkOiKx
z02Q}jEuh)h4?HWBgl)p5f)SG{P-}`fnOB{7GKqWPRd`51Wu`Pfki2m6-@)RghUXR8
zlA?XQ#0p=x@7eTc*6usX&&n<`PStrH$qm<asGo81i6h%)tni+ec6h6-;digM66wt6
zX9l<2UV2@-c!rO@c<uKk$NT05!$N#Y;}?kw6EA$Zaj_WYQ22z*1Z~LxSGDjsv9(i>
z{RmdK<amRY(aVjCpTisq4KT)3g>a(@%9H=LpMO~-C7HK9n(z4hNncMtN<Fk8bW`<d
z!}%7fr=Cm?Kem11i4`0_^NwWAG)dcQ`To$F$>Hi-rcQb;I^q9a3unGJb`vM9`>(FM
zq-brD(eHNMPd69T2OoMJ6A~p?{X6OA&X*lLJ8H|kc7W!&;SL4|x5K>z1$SK|>2R4T
zyr&=YetT=9chu|IbhYWzC%fO<<MBzf<4fX)O{=w~RNESL*Y8t!D)U8tF@xa(x5bk*
zAG&(EcrTshdbV(GxA*dv;^kq%{(H98AOGF*x)CD?p76329Z^bNDJk=Nxr+Kyy%}Mf
zJv}QQ?e$)A=4Y$Qvbe&?y%qa=wRG2cg~pwV3-HJ?)t*{Z_jOV2u}?vJ134o<yyly{
z>|E>8qprogEkCEOj0pIaXPwuVZKj#JO<VkpY4l&q<q|I%uKnYeVC`YA@vY|w0L_J_
zD)1XkkS|@wenB@;wty?ZvRope{kcto`Gi{=t;%n$*R^$*JQ3L4WB)vRn%o3g>Hb*{
zoLm<N`vqq%{In_eZsO06i%r$`JT+a}byjQrq2RlT6IT`&N(%SCOBT^Go3bKf`jIQT
zdL^mRoU9&C=OiacMai(=;mTpQV~S!1Wxj%EJZGGO%H}O&KcRH6`rTHB=bN-vY}kJ0
z_oh`0`TPEE`1&U^HRi(J4yQh^CBmjt{^Xd2Zv3oWAQ8c%!*0R#gUjKm+HRE@-&muk
zCw$^+)vmUmP@l9U-j(aYjJd4U|BUy4zFFxJE_ChjJOQq~vW453tQ)%<nhzK=q$(L4
zeKYrOzkUC-^|2>@r7x)CaNX0r!)))}qJ%=hg00H(WxtkL_1$!no*%81SKMglV5bmK
zxNYhF_(j%*Ij2`VE!SJk*Szz2v+i$ymp@83PNWOyul~%x(o0bB=($HHo*x&PcIhei
zj3Bc=vtNBnojR>@^7hkBn$~B2rsbaYJg6ksd}-Fw-@U2JlfEtrTChQVjoh7+LVl{J
z`IT+E{A1TA2?wulae3Z9Wo<V9>4_Ep_OF_g%=6)?3EvL372InWR@^9H`5gJ6#=%uG
zfbCol<9Tk2f?HqCvHBH0Tlj7N*}U^M%X@;C_3c_JKV5c%eM9g|_sI#Llg)S=RUN7i
z%<P+MQg*J5u`<wTw)X$u58Ng^+9PlMXvUn#!g-q$Z*KK<a(tt)Y}b;Mm`|TOHc4i%
zu3>w|_+-MPLf?;X=D%HU-*2}6^M+#4169TL5*sTEH!QUN!0<uV;poHY_hQn@W_tWP
z{)=C@+r53`QXwmj2nLqbXDmLuY%)<*f8T1NJ@sJc37&KF-@aUP&VI?Sr!QE$YL{~A
zC|8{NFhf*tp8u*tb9*NL{Qon#U*=BrC6ks@TvMk#N!fm8{-)SR-{**&Te~py=_EhV
zC*L_|KRvQh<>r=G3YVr&$l~=n(E4V^6s`Dxumh!UW)+wjGcX7hdAc};+^?)w)ijLE
zihoh4zWBFP>eE9q8;@-ep1Zc_xNP}`J!^E#C4aL2b~<rw+w#C0H7)Uzj_ni|^h<fx
zZQr}dWCqWgJEonp&+M@`HrBu9`}9sq!~2Fa`=-}9&T~mS<$Yq!4)epyH_CEe_?w_p
zpje>)!D@rd2_Md>^8+5IvxcAhHvOPubi*_qF45U@gzEG97Aj3&8Z7z$M^Ba92VR4A
z_6dITe5d^jDtxdvRA2}9gS`<fJNPSjIl|nOm8P1XVn6Vg;e9fn**-=Vp>^Mml!V0{
zJa?L5exrM%qJy8;g6t=2{N0|<E!-)e^(Wv``jm3F=f;cc{(a?rX`!^_#!Q)=g|Ca!
zitM+~S>dnRw&{hr^@Ikw^y8m<S)blm$f6m%aKe?0m>|A4R(X%M?)k>F`Dy=L|HCz}
zs?@*h&)@IwC+9z@G=g(2hlr}*zx6k7EWWTNVuErPPku+v(Hk58*q^>+v~p&8_Uv<;
z_B`wRvSr^F#%atohL@Cr83RtLvAvq|`p_h!&;L$dERQm~IPH4(%0{+7yi2a$d}w^)
zb1uX33%Rf2@(;M*sC&VB_6vJsr$erS?#n*m?grrlQ3)CW3>z+qSO+&cAJA-g-@2|r
zREuLi)1T%fxss(TxmdK{=xmO4-OG8-%>GlwUi0;WH}-M8VGUzkW6nNPL)J#RQXqQT
z{fpHy8JBq)FEbQ5Z0~sAIICf*+zmy2^~Pj|pIwYho`x)&l6SEx#CtdVXE^AQRxG;W
zO!S_{Km5DR0$5s7OV&?wmno{UHjq_N*JdcHv)>@}V6Tfeqo}v`%e&bjJTufS7ku#l
zWBH)A;k(u8A7TrPj=es*<H@Y2h4bGZ*m;22L1AV40nY=93Cth#CdhMy&Zy(xAXy>#
zpmf=lPdR7yex6=#Kjo}We#$@lb|rnKPh}6z1qihYe6K!oc7^TUy@fw&Ch%)jUu0Nn
zAi)0NujA2N^ABnVe(O(|bN2J$KQroh3M2~d8C#f4y0<w-#?X!NH{-qiHOqh8_m_Oy
zuX0m)&%<J$U-wO%f5qD0O*rs&bIQB((Y~>HFEZyduL*b-8rC)Cgt|XpcHV_8y7y(1
z)P95?`gCeaX!i;8dsYD@Z~n{>{ruDKc=S>0mHYoKO<fvwSZLCW2|6Y2!ItWh3Cn+`
zHOv+G_i)2wVeSz2A4bpQTf2-F@TABxhsfQS+2y&oqlP1&W6C${^BXqZ+mLnR9{Uy+
zHU@v@-UfR{Y4+WkpESPNG2Ym@Lwp8<K~FT-={Y<BhA;U`m~Ti*@@`N*^#9cThS?3}
z4Eu~fG5@Q7$Ke(1^@!*8fj;lFC)ck8Ug9`zS8mR<#{6_rt>%--d&D0)GCY)jAG1K_
zgNT7Z2A>b(q6R+((aEL<Y#rhkuzuirps&Yba95V$azLjQR}JqLCZkDbo}G%kxBp(V
z^hQ<tHT+YV%JlxT&6#v@@2SswmhJHuW3pv9<$u7N;r8im;q3RAHGVdHd1qR5)+SP&
zYyIiX4$KX6jOR-;w7q3H#;_+moMF%Ly9|%R82&8fVtU5>CgeL?MeZ)<`ON9cQzzW8
zTmO5HWa!~-c^xx9?fbdj{A`=R<#|qj?l(#?eCBkGcoxoQBc@R3_O?<e^B(&&<~{s%
zOurbK8BawYU_RinK>GvpfueP+e_FrV&wk$iGnVy3)#rc3_lzyhe27e&o6i`>kjEOv
zRC7FrDZ*2~URphRTTMcd&!dNS(>7&Ee%oJF+-mYZ#<6SqIsWj?8J}}!oZ8xbdZn<Z
z(dXM|?Y^$5_LO`$M?%Xge&4>BYyWBw?3i$IYfq)zt@=x6niCg^Za5Rue|!1kx1mcN
z7HUN9P1&z9XG?Ra-OX2yKa4NP`KG9+h!hwX@PL{Qjc<}4$cs#s{BNs1!QVQeefulp
z4XN&55)(KQRE4aW8zdR58$`<P@G-1sn$B3=pn72T0hNXzu?0d0_!OtAEV4F`T2RCG
zhxN)M$BrV?NAq**&J|r})wuuP|DEd*#Y;kEyJkG&YN!dw*!hZmgL9PN57rlKGE5rV
z8BEX2d+)C<W5)iD)$ZfGq~mX`A0#DgGuN5nn^y2tCtZiH;_hjNoY$rdsa6wMKJ1F%
z{1*4L_R|#eK*t<=mU+xKGM5Ite3SLffUP^X>iey_e4FQO*B5ga+>yS)bUW=pS%Ss~
z69euCe`nA8a{OeoL;8V#-wpZW7F#h`uQ}+&@A9Pd9KQ+MjXd)YrVa~@#Cm$%RqmD7
z_~|pAV!q7~w>%)#Tu6kei(!iki}spqx39kMZC?B3sWWY7oMPC(z2Yphhlqi|gIdR#
z+;`Yhu6{oJJ!wHT+d)aY(|g>1_VhDX=&CWVXUJJT^~<O2g_}~nlZtyU@aBlywL~t=
z5`4^+rv6iZ=0@*hd$*rjK2Lm`y~g+bH(X|wTg4ySzx$Gg^&IQqy@{U2d`C^fEb5<W
z#9ZbQ?>2L^d=i$ndhzW!sSm<M{8xaw4$q7a*yq)Aiae8l?$h^NLGZuN+6|X?G9@g(
zsA*`S!FYpjn(cSdvN+aR422BrW-DIo;l09qiy<uM`83%=?KfZQxW8;=RLn6}wlBN2
z=0U%{>&^{{Arf2+Pdd0f-JImon8P{~YsykMP8l62+jnE-Zh-{rXUzW&_35_$iAb!y
z$;cmS)U2I8b9Lbc1_k?&BR3?M*YMrR`sVasQ9ta_UhCjohHX5X&oi!ft@=JoIr5aA
zrQN(Dl}((#x5^wp`0mrJkMkA;WiFjjzT9%#LJgx(!TB3DiCgzGH@;(+R=;2-*cj6A
z!{+1unWYkZGAvG0H+dZ3tI+V`IsJom#xdFT6PDk8>nIRi_U9*$)%$X_*C*xI2dsS0
zP++i7?gE#Gx6$LDp6}-^d~~XMT35(|^h$|CKkM#r{NX&q<i_mHp#Ab`{^OL>W=hds
zoC_x`X7`_YL;Q+HlG^9b5BfVpIBK81kKFzA$3>SlF=D#D>owB-))nb2-*e}6*fHU}
z7r)LGYm0xX3<fv49VAbvu)Xn~y7-RY%6?|+uJSXLN=Is+J^WvPlyAa*1D*Zx3uHHZ
z?tlMt^T#9W+f1?>uQP}nFiNmQY|Qo8z<6)%*~9u4hYz1Q(cd+D>g+dt<){7L*(9h5
z<UHSF@Ms>xrAfaFl2{L%`yZBIU?9t(a;AGyT-`hg$^U6=;s2KQd}X`Soh$l4<v@1b
zvO|44_Ahw;P<hXK4avLN^CI@`KVHj!{)dKqkQ{@UwE^>Q=Y~=T4WafE(<>bL&NJIF
zJBi+#AM&On?|}9JJB5&kwU>k<<g=WPJYR4^n$L&Fus`A5_xTf_>pS>1crm=+yG3Qj
zpG#iZhcE9=H>#Re@1j=IZo`$sF2_`H{tiE^_!7Js)D^FIy#L34@g;6Ib{{@`c4>H+
ziP)6CGj6;Oamw-0Te~Oam|t<ke<7`hD?g{~$bWvcV1CkC1D)B^q(vdi7aM(&6Ly*}
zzjb4i@uC0Iycpaw)_yo|EwS-b<MX5MJP!(b{r|t(TQKa(#6-(C*BNWhexJvA_RWeS
zlPj8Y_p<*_s^&iM>4snxO9%5ehOnI9RyP^mZ(Dr7a6|jdiSnXTTkkQ&ANj_nAoYOb
zb#%j93;#oP2AiZN&SLg+{op6_x#|kD97{wT_XEoWodbP^3W;%3PTNX<j+WZL#mD9~
z2X~se@s3aW?TgQPHiS;9J62a#%44+a`k&XcW*$F1LGpd~KL>ezyQAAgHt=$|t~jIj
zA<1}K<CL8NpR8~1$l(`VC73?@qU4hJc9T8x=X|%`{F5(&D};R=%RII`W($r7Tk;-U
za5wt=^W(`I)e|mmwh{Ok#dx?Oy3y-^Uc>w581+EQz{SU{{k=EexTvo$tRx}%`IWs>
z#KU&O>TNuK&dvM!S4=Sk+UGW!%lPl%S2iz(<c5h3j0d>>Z&_Z&|6}<-TLZz0Q_|1w
zeBhrf8`sB_$25ttZ`v>KLY51zZF7Hcyr1?zb4_=!-@^y84XL>d$C!B=7Baj(YOhlE
zPxCs%WkxMeeTFG+46jdbd84v%@%gG!<|yXO{xc6#q=bCxBtN>uRWa^o40*AS^+wq8
zB14(u-3(km-*DP6`Y`@^TFVl{Uc<RzaVWP0gTcF^hRSfZ8wV}<?yv@YRMwx$uzUma
zcIF=p6%q+GS{-}r3LP{iNyhw~zFnAc&$}?gzjy4LzFRELp5V5rr1_+>>e}kuHp|(j
z_W8=uWlQ!m+++E%XCKcT7Bi-qH{<MPhcf=oNi@IQ^8Ml6dv|`w?T~t~^^fcifdwgP
z(KF(!-ah>Mm$QI>1KSMd5{4f(rR;A^_w)SbZQx?KA3a6NL2>i+Uz2B^_s?3lvRcyi
z^q#cuj5XUgI-j%J+?Nx*PI%uR(E4h~8rsc?4U2ca@i_A}EothE6q$Oq4VTmYpW#m0
zZ+GCFhk~b*g7yA49X>2KCd%>$989bW6H8K;{~-NATY&iv!y1M^PbFD+Smi)Xj)ec5
z8{*CsiyXN4w4AZ8Z!b&0$HckzOh4N12_`I`JNJNnLw4i*hJ>Tn=H5>Dbh<-*nm3;m
zbB_IMFFOtQhUJa!l@F#G-I(-U$?QPBHS2MHhUv_E`a=HQQ2cG*@JU91_YBJkMbSOs
z3}s=z8P9!_J^FF#0lS9l{mMPAjqeV`DJT~{n;IE&(<t{|@?OUF4S#RW`*NGDnD@`5
zB_AH}ls`T&&6?}?%e&d<O3Lmp^L;(z;W-PP^IV7D8m_;+*1@~sdKuG!=UQS3a{_L?
z*5UW`7q;8~)sLIkRw2MrTxoV-+?V5KQWZQI>?%xC&g5`^KE1-$UxDv_X-)YVwq1<P
z4d)wm4+J#4=h0#d>G~lf_~c*Gfo+p-Z?3JI;ObR(XZejkJO9f#oV!#tt@`1etOS`2
zr*_Y6fBT<7``jOiXZ1OcQ-5ANDwGuMv&Fn}ns|ktxIxmpscrAyJV|(SV`E;8i1F>y
zXV00XxklEg6+Nm-b&W16o_DG^Z=Y3qjCtRuFlIl7*L$OLoTmNp2$a0RvBCKDl$RC>
z>2qb9KRKvu;634?B(RWc19QUPKTDg;6LyzJ3q}94KUUqp^C3q;;(=p{60L9jyd%OE
ze3GzV`=_FBuJW{_=K|d(@e96UI?nLPul0k^Qzn%=%mqvCX}<Xr!c_5Y!*`a)@0k9q
zpZI-Cru^$;CSgUhw4?jy++izTpPKsjocpsqZ8d#!gihH$_n9vJy?Bvsjax;oIjh1F
zi9@HV&Idm1Vyci`@nVT;jeW|s&yRJKXQb~G5t_nQ#>_sqv-tA9vV!8vS5GwUTA@^&
zw&d8h87&uCx7*35yTx;?j0up~y`gyJBA%8*4Z#yRRb#`BE<JSk;=3*PB$Er@{S@Ht
zcdY4~v5ol}(?g|Gq3`xiGF%b0<4*CzD`L-OCySo%H?F@M&Q>JO9FkHL-n+#NJkmGE
zl)1$6_p>#{C+|vLPzu#34>!Di#c$`SIo^VPHf_PtM$cchW$Kw^uY`|yR(3XOe-27L
zzhT|Kyx8;^XDuiC>^wE+v<#B+SC<5riHFTz3K_ixkJ~jEE#PS}W?pro1!YA%*bN%Y
ziKiO0j1sF7w?;w*V3dP}@q#WNi}={w*=Qql0?R-HuA|lrw?NC?ylC;_&$nw~E7#%a
m4wfW9DT$z<fDH40{ykyA=?0U0%NQ6K7(8A5T-G@yGywo56FJfV

diff --git a/docs/datamodel_transformations.png b/docs/datamodel_transformations.png
deleted file mode 100644
index 7898b158421f07d6e6e1b2d714185c968f848e59..0000000000000000000000000000000000000000
GIT binary patch
literal 0
HcmV?d00001

literal 16268
zcmeAS@N?(olHy`uVBq!ia0y~yVBE#Pz;Ky^je&vTd-b)&3=9m6#X;^)4C~IxykuZt
z;4JWnEM{Qf76xHPhFNnY7#NtiGeaUuB7A+UlJj%*5>xV%QuQiw3m8DaroyTsw;(sQ
zBvGLvHz%*ys=`(Ytimd<*a{@9ucQE0Qj%?}6yY17;GAESs$i;TqGzCF$EBd4U{jQm
zW)<Y(2Gw4al4h%vQBqQ1rLSLJUanVete0Puu5V~*X{m2uq;F)TTa=QfTU?n}l31ae
zSF8*%1ZIv)YH@N=W<g12ex3rz+{C2((h^%GC6Gl5FaU99NoE?tro3XPx03U7^+7WF
zhI$72aI-<igG8-@Qj7C*N<qel=4F=H8QFjpqYEQ!vjOXJ&d)6<N-ZwV%+Iqkg=#?-
zLs#!#l$n}Wk_fWK&Jd~|Sqxo$Kt_H^er{?>QD(AdUP)?EK~XAHH_U;!RY1+M(FX+=
zQt(0I1}qxn;%3KXqYsWnP>kAfwOv}L!oXlF<>}%WQo;CkF8d7G(7$c}9bW8}s(Nv$
zyP-wO=|$+$4yL9MM#otS3&kA+T_&jMHgq&>@erTjAmH>qNY%lq-C;%0|M$DA*^i&T
zJ8kdDdw2itdtO<7ZjNQ~voq!I&*Yz*Idk{_@AZK)9n6Iad`B8B1lS)r3=HQ*EZ>@A
z@AjF^E31@yb+=yT^}gn>Wf{qL&t19ic6Dyloq5~#pAidte<f^F5y*iHx@BkV=-z(+
z-r4`%O}n>jFJM1?*83;3^$x$lJ^m9jQg6-t6}@uX<?dUT%l6CPeQJFv(>cgdri0n`
za^pIS&a(|u!w*ONV9;wkxzPR&@AHGvA9&tyt&*2XS>%;%lpnji`Lzwx>H}{R?*4Cy
zbo|=7@B`l+?(BmMKe*3u_&eHXtgO}fwSVn_1-}(zI4?UbjGs`!YS-qT=qbT)_3Lfx
zS_!+R@Wk>A-{YG<L~LOHrZ<Ow_TkM7+SQ#FmU`N7ygzt%gOW(r+PB%k9`BDG2sJQv
zfdsxmAK#n<RsM$=Bv{@b{x$W>bf5LjQxoEU$jDe3TkPSxdTR#HbxYZ4?bTYJ*^Nyf
z^foa&@HU@5Sk&HpJ60klkKrc6vbJfrwriZuIiL6;cZa~8Z+r<3hJTYI*zR20nQ&mm
zy8o=7&b6mL<S>ZvWzl`fB=(1W8!Pk7m(w3qZxG&bx6VMmhG{?B_m4IP1(G|Wvw!c=
zF)w(}`tG=H0j~!0M+RYNGTCyUv3XYGnaRI}KCo2?nZKxI+w7=V?VElu>q5|i>e;Ea
z4!mvdvxCmo^SJGAYCX`fLE=o{L(XO2B-Y+G6Ukax>mKl6-Xej^ClBVnXJ9*%>u{E7
zTlEU#6*2!+&b;TFHj_!FZ)@zb=t+(5=3F||?jfDSeY4qQgDjVxwuxkCctW)WQ&iS&
zan{2NKOE(5(LZ3Ob|KM8g#D4j_p${X0>VZ5acRlNo7F>@k4_1AVJfs<+}MXVitXsN
zFJj!h6J7;Z?9;sZXZeZk+ZW5_&#U=bE*HeMXH!(x{W~IB)3>I74M{y3=<~hpae~wj
z2Dyf;TE8~uY37FyJ*av+ai2(5ru<#;c6sibyEpIsb|WhL_aB|_n<aQt&xX$Pv;yUx
z3$d~@c5qAO@gJSnrv9w%gO13oPM4kKm#0@UN2aGPFgW4E9~>Id_i|;x+G)4a8RqR>
zwPCNqR{b^H_g>Eul<``-G=Ea1o(%WpgKZ1mTrCaD4{|L1{Y>Tjy{zoq?bpj#MfTlT
zXS(3mqJ}jZVB7x6yy$XB_kSo?!G5jrXkt(R+s?xW6z$qgw>QjQuts!-rIea}`nHMN
z(v#n3+HX+b#&Cy^?VA39w^9pa{Bl>;Uf-CbyW#A#+q^v2PL;m#wBu!)y>{V^<wbwl
zj-5}CSh07Odi>P2XZkKbiMl4mAsuq<-prN1E0wog`}R6=pPdeC)#JLkOmPyhw9CgQ
zvBc))Z-x_p6BIt%T~?^zcMExDpIBw~r7?7&#J<~8x7aGkZ@R%TuhGd~-Ehl{#40(N
z_?g@JnkU}g)LzV?@!oYW$F-lnpsba}bx7W*^v6fd3eGZy*K*9s2jyPOOlY_*a_Z}E
zhAzi@W_$S458p0O+_Ho_;=}1nPS@`*Gg+U0I{X=bLe}1G7pv-7TKSX%SHIPhkBR!m
z@$%|fm-UCBsdvF{tAz7Qv`u8Cw;oRpm?6Ovb^4&d3~rfCTaKqYtk|Bm)M<ORLYC*&
zTj>u1*zV0dkPvw-<&4A?nQ5jOv&yWOtq#4nKQq$8_{(a&BYSR}d?<1GWhq^e7_j&4
zv|D+9CpYYkYIj%+E=2na7p!KA)n$Hrk(ZG<HvjUaHy@6fmi?;yq_=9~{54=DGB4Wr
zZgBsMGtViXB|}!ZYxSl<f5Y#2vH4T{pZ|L02+9SZBJN7#)SK%sKCsS@UAr~2;r(%2
z?(hRA3lx3`{hYNy;D=xQ-Y0hUyx*Po#=F1tpIh#jzC}v=chbIHg4c4rp8A6dR|UQ;
zw;AI%)roA&_%8K$`vUzkdv><n6AMKCNY&hbXJ)`KkL&BC-?<LC{WXz$m1oU6^m$ux
z>$Ijf@)7mhy=1wK=kA&Nb!h~|q!5`K^I!7vK079|<+<4o7M?@@bZ-0#$j|j`{l_!I
zzBKKCHJ@B_^+SPxXYp*x51t2{h_A1dSjQY+^SynYK70M9v-4|%)eof^Xvc7VlaX)8
zPIN8!x$}d{8;-p1jz7e8SdSn0{lVf#GxPT5P36A@BBqNo{4!;~@onLQqX*&@yFk_2
zmn!x*wm;<=-dr`EB@^_;rA{S>Z{87JGv<Z!cFjJukCnaoU`Eh}>nR^D&c1wr;qm!J
zQ=gvx&!)%R<Cv|Z!^O9{j-%=~%MI?|(iVSKnYErzkSh53{y^?K-qlTek8wX-ZXnvj
z`1+R6Kc3YM2d_A+*7;p~LAieZm73STIdZ(AQNsJC!B)}#UUj2$b@YL&2?bH&R@%%b
zZzm`iuF&IcedDUf{cR3U%hg}Hl5FZ*nRyHw#hQN~XpfF-*L)bOp{F)Yz1DumA6)|t
z$uH-am|uNO_&GJfcZcK^&kgExIF6Qin(@AV7ktv(V%1NF`-?0D*sB_u<7D0MUT-wd
z*yUWtet`d=;t}72lmEV7u$eL6^_9VapZe8*1m8TWyY*{HIzzk4^EUQ{QQy|Ig&T#Z
zRk^F?u79Iv5O8y!zN6$8-mLaRa~~8fpWk%5`1Zf>R^bERBy{e!m9F^r!Czzh<<dHa
z>rJ^Dx00pTSVICkiv7*OKP)zZW$ZVdCE2p4NLdSJwttpmXFhFpA&@t%Raskm?b#~_
zUi~$!F7!SytAIJ=^un{-7rt0(>s58pYvJdadA!%E161?tm#sfk{6Q#(@#)iUgZJC!
zvocRPynrq7da*U1x7cmt54+A!tCfoh+j9JT=v+Q))1{x(!DXVitVP@3f8P8z%d6i;
z9~4gLKi*)vZj)Yh(bp+^O{-&t57vAzvdD?lZ4TY|_N9mTV!_}EWp}ee?;Ed)oA(o(
zE!USGxOaHAK{bEzfde0EH@K&*&k!(a5?dnZe^`0yOSj$&E4C<tt(q+Rf%8CY)dxPQ
z4>DFYLQ<L!eD62go?myg(IVh>W95l&WzL39!Mj6>9v`r<3gD|ayfS%h;TrYTM?uXj
zt5*lst$A?bX8i&76MN?GQ{1w2vSxu-bn7M?z7Xq#1E1w?1{bjNw15kGgMBhLn!9Ru
z<QUK3Nn@QaWd5S^^$JtTCw{qW9i|;^ye4Dt;IF`*_%iD?oS-T?_Vod^oO{cs*)!aa
zjpdu-FYx=q9qxn+J2<)?{@=Hx{g8Em{ua}yVxiPMvBjaR=ULqkYDjRlH^1WFcdl6O
zN8fAXg!5}I$f(?TCs8o-{ra%e?8kGi&kN=LW$?bNrq$ywQ(R;J!N2iBA(j^`pHwpc
zh<n@pVDhIxZN4wbH!}@FwNtB3EA4-B+kq)A|L2j-^Lt+%U^01QY#|iJ*xYe^sZoc7
zLCvBnh947dE?v-e@aYB{37dQD)xNqfjl?3ZPu-v=(ZaL9ZsA@{36Jf~jvrKG*tHMt
z`(P<@@a6m^M;mq+-*CRobjrvg^0@iwnW35SvYXEICrTF7pJNT>D^Z@YpY_W#hJv}N
zimM(rsDJyNcw6dpRq?d&9@hh(yUiAu&z@M@&m8;UfMU*NxyXe#J$c$RA5Qd|<vIDy
z)>=mIX7xWu56DgXkrlY}`O!E3PdJ3%JLh~xp5y4R{YtgQ5-)i6Y<5&^ejK@hW5?ar
zg%>1M*6F5c1v5{4x@Na(WczbDQ|=wS&3m+%VmI!s^WAy2{#(MPm`uBqlbB~c?zCC;
zwt?|{cSHIEGmUlkJlqNu_<G#g&n<hmNIiJ3WOah{bfa{&T!xz~jZ9;oMLYwwe$1D)
z^07|4Zu<6P{sL*<RN13vd=uWkU;K&b((Z$gLn}8M<=5U{cdGna+^g>gCT<OWs4Bs{
zc!AZ0wa)jdFX-Kz{qpRriF4!mx4k~Awq@h{`J4YZ6|6Ye{LbbFyUGJlgX!2}=6O#}
z?^SPZ*}Hn>L3OdW4@I@AJkC#gXlQLKVQ?g_uW_-d<Q=ZPUWba4HrCl+`gdr;EY4*a
zUZ($Fo_?ac!S-3TpRS2|=k@NHr@rtm(?9fm^{2N>`Pjba#usp0eUMqZ+<EJlW8Zv(
zqc-qowudaXxRA;I=HXwCdCbiBvIIa`q@MlSVz2kbYkAn3?jP1&;A79qW-&dMS2-a_
zg44Z8cBblqdHt8CrS!kMaOu!wkNd2~P5Ygm6xK=HSo~Y4j&mOSn#&EFnSI!lANF5h
z7i+wpP;bB>$9uZ@matu`{NX1F1=38@%qLhq`B}c<FWcwi52^x|H#pv|-S($2A)2R4
z-r4epvGmc#bCv-td+LMFuHBIJeAVe&g$jIE3LaE^;whVVq}|!@NxFd1j#?(YY5_6U
zryGCR%*gM5C6M8Jweeg+v24Wm-w6{8B_v<)%u+tEE4I!|;zme)1)CkyZ5tMzqHF05
zcQ~)O6e#TBp7xt(UQ?*U%{t~65gWEwsMRpeKJ<R!i@8=cKaC=ed)$y-WML2|WAGrC
zpUq~yS+~CSNl>o0vPe)cjJPuCa5>xFsG=2@<0j6MWxJA~`6+7B!Q5GwoS5&fI8_tB
zjBR67(e7_oZcSp$XWXzZ>!$M@W}U@pjY@Z?1pW$^yQiD_-~8sKuK&6hjf1T>N0;p`
zW)0YX{(5s^v<1(+=ED#DIvzR5yr^QIwv6*>dw7@F&Ck16%*ag6S|H6-yfsj-Tlzrb
z&y41~4=be>=-O`NWV1Pb&$c12Z7Qf{UGUpF;rz?$yY=Q($K@=({rP-v<LcO5(_Ifp
zdus35cxS(N^yJ>-pt7#2lez3ZyR%{5UGK8p-sP7Ywk!9v%uY}R*$wV^ez9OzOYJXf
z5`W<4ao{kd&=X*<n#atxXRadS`scOVZbDjBG8&cvd==&s;`hjhXiZ9iv@o_DXN+IC
zL%1x{|KXIG5BMZL{K<)9iT8;WTx4-UT2|teU-i0O+%=lN<#zDPF~-mPw)C|I-<6UB
z?27N$nB%VO;FI`J);j%wdb02b1I1UNd={%7^6UL*+4Oybd3*Lj2K9d?;Uet&o;B=O
zd*J^-=kr$8lNllGJQceeZEcwEaS0r4y!WVKKU<MV#liIacL@^A(as|5SymgYc2?G}
zUS`TW|D`{J<OiDvdsoHEEwZ>EEc;>kmTisE4;eQ+-=>non15gvC^uYamNj_rm&NU%
z=?C48M-Ki42QGFQACQ~9Se!4^YSF@PjTaLRbc3Qw735rY{(t)&ndH=5?w)Im%-HDo
zCh&uKqU@|W!l#OU3QxE{MQ8V<FV|&eOr7&{-?Z61^By!X=JRi8Pd^aNx5(nc2|gdE
z$7U_3AI4h9Uu&55g<Z_&{-r}tjx)-B&^jTX8+TylisM!f_A?qjxOrHb>$H;yyVOa>
z^N#yj_AlONw87YiJ5c7u7Unop*?TXef*i{W4>W97-@_B`Ja5C%mKCREKKwtN%fG#u
zf6^Z|v1SdvD`y+Ou6!B!;feTHJ@!9m>bce*2%XT-&Uulg_`vsj=bFk72)3O)AaBzD
z$RSqtLu*0%dZTTz;nBNeQ`z?HJs>xaF+a28(SaWY31(Wh4F6dD4>w*A2Q_gX9bhRu
zP(GQb;@gLHEi5)Z9ae7}_^0JC#ku@UIBEG{{Y*%|M6dWj!*+9t59=D1Ds(Z+mL>>>
z{V?6bcKG3O15=m6geJ=kX3?LzC5|>u1686tajfzYUIz}htO#XamiV%-K>f#FOF6MN
z4L+X&_PPUN@+|Eli!3foV`ke^3wE1l$0LWEjiECw`R_5d?=^3m6K|mSN{W41`sR5X
z!rsg}o4)$)jYiGP-Hhk8zwSRIB=9wky{6-dLIvBO&`Ey}wEHfyxbT$k2RBoCNu~Cs
zhIelo81v%|9>lVBJ72MunK9+=AH5jnZ}<Lj{98RogU_UgS@^P0{JEY-4z@BMc0UQ-
z!!I4f#aS`qg7ee#-_aGadChMXo(uVavegS&zKTP)<+~cTN4&rBR&M|F%NyHwAL5YU
zJ${G@l->l`tJX5R9;lXO*K1_i^X_NphvSp=8SYmWXH?Y|m}qn{^S*89|0Bn?Gw{v@
z`QZM{IgIDy_it$bE*>GW=vdi-`s!kiKhl57E%t>O8M_oNxNY%4J*ui^duFcGtzelM
zvt)l--Z;ElLg<6R!bTIx+?z~!4Tc3$I{bk$FUt6S=s%5TSifnp#RXANX7xR+^q_XL
zw$#Mjx~C2A9skMQYl%MaJ!^S`249Ia`=5i)cn`??$=-MyHJ|Of3;QX?_vha{P_ivl
zmoAx~wjg%*;lCe*WH|pSbTQw()Oa&<Yx3J~kKQ%wADn(<vBd=qzKZiV#Cz{IbT9a&
zzG$E0Jm)N9$19tde_ffi=Y}jxT*!}v&hOsW+iW75I0L_zCOFT^OR$!mw)Rb%?XDQh
z@G~ctECi(sCK;#Cc14yCa!>uy4y`=nCdxi7u_~22?~L=mhaZ$|7^)BT2pnycd)TnQ
z;?d=x)Vt?;`nlZ;7X->0Jdox2_JOD3bJH(D3nRnn#uvl+ZfJjgl<;+Z(X9W4pRS$x
zFMF>kI3cs+kwbs+0Y&flS30kQ(za=(tH`oTU1`kBoX(hUF^BhWf_#P0rxn&}3*H`R
zuL>(;`zf#L*Sh_o+lPqxGK(xOB+K6DwEa9^O00<EvGR>=Cr+5~l`ylvc__;|Zx3(9
z{?_V)f9~n%Gr1iS+8}Pj5+@?VRqQOnUe(Ghy>a^WM*Ce=wiYdgOfH2BtSt;=d$+JZ
zZ`Sx0@n`4C#RuPiG}s|rm7IUH-D{8TL)-1EAH)`Plq<LtChW0zFnwL%)6R#HANJje
z{1E;_Vv)rKS(y#T%XrSP{tIgV(z?;W@Q1_E=~GQ6$1PUB5gfnmyWF$Z4{9}xRZ4xg
z=5PecteDMQ`zkE>uXa_RmaXNUnE$JSlVv2{^X)k=wn~24CzpS`AFAIvYwi7bG_CoL
z<p(*9_}vB<L~GL0pxr9Suo)G`EML7ipi;VKbzJDr{EC|6FPKkQn|>`a{d(naqvy&L
z#SgCe@i~3qfl=9_1M%wLzrU*~{9^w>QHDSFZ`J9OQ$AilyV$~DpUj7Fzdxe<^)uah
zO6u9;|Kxf9dvNg8T9HrNW#u(>zb`GS*G<XL%+I<s|G+H+DYk}N{J&;8NFNOLUAg}M
z&DlEr+RLwg`oDYUnjeWzUp+ng$42FwewvlR*XfCC;`yU5SLsaA;Jb2{k$;ujg&LDb
zemmIG80*zP{C<$nD#x()q(j8}zaO3-?_;a4F@N=s?}z2D%3nLnLhAPaW1IJ4db4IC
z=M2d|YmaSvQoecX`{nj2dn_y_E!^|KcrTj|Q<kd1j;fwFjQ@J38GZO-+!$recW2+}
z?OPK(|FyUJ9!$;6jN_8I5w=il+V<uDcq7Uui(T_ClX)?PSy#5<@5--v7W{t%Z>1-M
z2gH@eve>r=8ZfWn=08;Zph~;xlY=dz&Ud~Y_d){J9!#-eXg_c}q5qurt=t3i=0@?#
zv9<TawccGARXn+n`G@mH{X_l>_I_G$U+`VS?u4uxe2d<_Z<xP${_^>&=dYi?d475I
z7QWW7M)@YjV-24za(MqU?Ehb6+QZs@N5App!l-#C<XKPOVP^BY{Y6va%$2AQ;s)=d
z7lgG6KG>L0VEm#&=M0y)Q=yKJ+pnYzcf2>)iER4gv>{{S!II(+JGLGV&Bzt%KOnJ#
zi{}ON0kg9l-=uy!dLMthM8}%d#_fIs^MW<;2A{cVc&aw4$Z+3omV3Q;Rp`1(&ws}n
ztt<^Xs%#dlXXZap`>;4dc3VsN!^|7PVhrt}b-bq!S3Yojbnw07Pqi9xvzChwrtgZ9
zd($449(H#Bfj>Vt=*Tc8AJlf`Ulr_FX|#hijFI2q`@v7TEBU#vHE}<b-N9rdIaB_t
z4f~&%zcqj7{N3~S&fh=LzxVuoSM8MNtG?mF<3t9Yrtb?mjOKk{Kfv@sQiik6zP;)7
zirWuYEDQNu|LTX}%hL}Qc-=ee)4yU#_pdvB#}}rGwe%i*|9j_$+#lLDd|C28)Oi}?
zofbxW{$a7%7T#8QP+_O|8V)({>j%y}`1s_TS;NYOOCNs}n6{VkSB~_2*4w7M5zAl7
zA2u@J$$1~NKt3-!G-IVq_ach`d-gv~b!?lbuG%x{OXLRCiiC;>tDGM$To&?Ke!bS-
zq}y_GjQkUvm_2iX&rDe+$5TE1+3LxYCp-*&tz*FYhDpv~S+~P>57qVZmz}C7M68>!
z(lEB$=5;2ge}t~Ccis1o%hsO1_}<ams*m+>bX3Gi^A(kjFIuk)J?N~94scDdYWcF)
z*Ya$ATDp<MvX;dU(`Ja|ypBm=*v0v)p)xLI)grsga_P)I3e0!e9~ds=iIF#3`cL|W
z%{tzIZN+&%BzThK&QG(KxO3f7OJc(QhR6idg8d=c#~%yp=*xeWHhRA?X+gBaHpbMs
z^MZK_UZq&9)x27@(`%y7BZuFzGj>c}w0`58ua}C}XFXqgra$RdjH)evFmu#Ge~%xj
z+t=8zwdSdQT%NSdWUJllmDi+FZZLmwC^>Oy^1<61>_blH?&VkZ=Jr$l>yWw6>ZRyX
z^_|=YS6^NIz&c^Wx0DA5Uz(KU^LL%Uc)ry5L-DuI--^F~{$71yQe2Yb+V{&!tCHTa
z>mPoYF!y@H!PQZJPkfNjN}2F!_uKEk%r=OpB}nqh@yltSoBjU#`@9VAYndN{*!FtP
z3i|&yc`n=Ks|Op3h4-+wK2s0ea<GhteS7W$-PKzkU)<C`ooo7e-gn{k7cQqKu&!A8
zZQAOsd7mB~xGs~Sd*Q6h%SW#+i2ime45)ArubSKvzC88p$)50fzQl83Q4c&*r(dd>
zxTpWfvw+%!cMkV1eeksqw=wi#k5gTxKU>|m{_eGq2b?P|*s1Kfl;=5r<ALUS>lxDj
z*w#IEtP|2<3;bSw;QYt(2b?AL$i97--k56ZIQ^;XjQgqk|BE*`rpdivKku}4Rex)w
z<_GN=Qtz5hK1eU%Ua|Z@>7EtF4=NaMPCIqndF`h72|vYeY&+x{+8USokL!blP`~N-
z%`!V?<bJLAvSYh@#ELohrk^QI%$*_V*B+IXzccE!nm%96*~V*3a~<Cq82r#-jBW^T
zxVmdvPvZv1)Pv`I#2cg!Hkajp@P0dW-oeE;o_$-go%w6E!Gx+qA6OoQJ_*~vJBL|L
zXI@WR<E_<|mr6>bJk7q!*uLapyZz>1iOe2`w->8Lti)|L@b<kwB+#*IKeO`<R{Q%?
z6H7l7m+iBaa$I=(q5lS*A9rI|{93#Z2D0qG{r!OT20k6W$xe}S+;!@?&9?iT6Dof&
z*tJwA25*R2SU=$gPoA#Ps%7y8?^{#8Zd=v$_xzgQ8<}rBTljzTlAr$`{Cc9x`oHn+
z1CbvxccwjO>3@2y&;2|*+p7GA-<1*~MYGQzzHDHf!|Qr`?fqS`%eNNk^gVK@m632U
zoyXyJ;P?m5IlQ$A-W4pTtQsWwEBJ40f9Sq|Cr)n7{6i5tB>pIA^S{_{zI4|AHHV&P
z{t#Mo{MVZB%B!zCk8GQfAARRs>xXI!(JAUPryp!CQISZg?p~Je-re1CZ_|0<Ec^5{
z=kUh+$@^#5x$C#jbdcqpe{gpK{|B`XyH~wY<uz#RW8XhpPnwU<Tg+-&3**uim;byy
zn7fvt_0-qO#1~fW2i!jJh(yh5xgGTTz+B&z&+Pvv87(o%za75r41d}yyE+!R=c}7%
zvBt67-mJ5w#OBsNX~*?<&fh*zYSR|pYG@!PBWoqhQ!x9`vj=5&)3TpN$Q*6_Ws$Hz
zI&|eK{owUaZcS2;JY97(dhN>-8cZ=?H@&LlU-9$cf~d7c(aTa3z2DASTXj^<uAcMv
zcbf{;xD(wh{%am<+579O-TJ+A>(;q@H80HkHs$sE^Jik8_Ha)5_WPUfjAh5Z{pphy
zn0l}B(0bMV*L8JQhwjt<BqR9IBFB5JZ2zyo)5}kun9tnxx#hOzC6)a_)u(H>Itdjn
zSj;ztIoAEU@6T(pA5<dzC9lM{CULA-aa-=V*wHf<KGRd?vL8>}J1xv|J}ckhtIn5$
zPwx4;LZkFZqs4`4*$+>C?!CABx9+>0;T!l5mnQJPDlu4Kk@BQqYLxW5cEJL^iqxzF
zJ9*AnbNDf|_s?tYK5(yZ?~U0H8Fw&=wQc(2v*xkk4rU%H?xx#y?0v19+zgW~Pv2+1
z7yjwg&+-ozHoVIlqU73|e{MhcW`pn?M&pLxKa5vw&5xVAyLaISoj;;igbaA@^xeL$
z|5w{&iATsX`=nnPt+!`dUD+xc(SA5m|8U^~KbG(IU7(@*BaK$p2_L>6d3&<fRf4;_
z>EIjZ8N&Z~KR0}mJEB$~Rq?6pM3(&z=`W1U*N<<Ili)F9*lJo~r2kqi?c4ekofB6a
zR`#*K*2&gO+v6T|C;p?{*+>I(iIlgYdMx<|OAMsCHnHt^f9Pg{G|!vpri1IR?aTH1
z7n~q4Tf~m3`LTz_t2=9r&Ogz<`#W@%<wEz(2SgGM#0c?hVJqJ|i!0}S%?Fl{c#a98
zM?pgx``$IEPgwDnZ;j8z;Dh&$?kZrFC=g@z-^YAVc$0ei(*55XUccqYZ$4NQ|6`5O
zf5pq<2OBCYSl>_F5vSPxY8o^1qAJmS7pG+HD~y+Y5tek*G@&3g|G<+CA~n)^EU!&&
zd|_{n47|p$yst_Bz}txFZ`W<j@1FdheS<HX;`h{0jq5+TK9?UZ38>i2k$HO|UsGLy
zro@8ucJ+hXBul<}-D}J|(D1_q(g8M;ow4JrkY+$xf@oS>xDihrAFr1GoASnkPvjE}
zBH85{vk%U@(!9VX@q4b9Df^zlCH9Ti56T4ycC<~<`=ZL(eCEv59_N{StlQHUR5F~}
z)VJu(l_lSqZhy=+Sdqi1-<Z4A++5cuWaht_FZU#D*eLh?BYQyheIB1*0Tqk*%bRR>
z&pcqo>$9KXuO#>Du!>bv?saY{eQ`54;lf;|X$_K&x%^co;u0p%!JQSGndiN@-)1+p
zN<KA9-)Am=bTi}O0}1cniGE<&uXcx_b&mOo9+qm|Zvt0OeqichskY5qzlM3{M)%}~
z9a1uKCLj2kvm`b27^}mJWL8{G6p=7+JMeVDITq%Rmp>RwaJbICuKadW)bBLuuPH(S
z*YYnI{WmMHpWhgMU_s<H+odz*C)IK1%x=8&;B}dNv$@0kw!=0BDl#mfA@xE9zLJH^
z-<)i2hOd^FbPqLJo>ZmJv+9iX#_y)Fnft6V=T6I9zvlG4(8zCJczlGNE2Fm^-?Qz?
zrF~bU&To3V@^UHflHgemY#*Hy><i}0roDX^wWhIrfqriEw#M7xt26CYFL%rJ&+n5F
z-ulWbPUb}syA8wn)~^f0KD_wER<?j~+kdqU;oo#*7z1<J_0+3-Z*@!QZI{>GC78!9
zuT>>8uRZ+XX|DMOxhXgQ9qhexVAVF=Q-Aay^8H|tQT(Bs!*+Co?MV&3D|LaJFV+-h
zoS%GTdxGQ+Rv9JZgp%-^)gK>xN$g(%8q3>ZU&CS-^F=d<VRz%{H!IJv-CLX!a@O;(
zT$|~`i*C6o+;2ZI6}d*NJ*4I1-sl*xhoKTQ#I}J?;>EWWTh6h|Y-3zMsWw2di@Enn
zgLqQ<LF*6dKWr=N3;AB~-D-5Jvft)ko{^)STe9QM#07gF7*+7{l!!Nq%Xc4k5jfh|
zR``G?>e2MgYpQ2PE}S#5MCOGOyIS(@xEfw&|7Gll*E~4v%I{Dv?Y6tmOrFiwmwThL
zecR^)x7qR!U)^B;V?jtc|GPI%SLVw8Q0~3IG>iRR`n`o`H26xUGVe2bWV^`x!0%{j
z$Bz@v3tT#UF!A&|wd(orE?>I4_MvRfdj1DFoXZb5f3T~Fe!>yKU1Oo$#VmZL@&3l8
z_J^$uB2!n_8Oo}4Jvz`*a^T@Q#{$tC${%*`RG3n#V$5du{&Z>h-;TEGo>2RZw`0FA
zw|zTrp3}W>TfRLWKLy+l<R=7w*yibeRaU>mYFEEzygK`@fb)(8>U)@{H#)m`JBqNY
zwJ^_P`Ofry`_BW%&Rw`9y8ru>YNv)BZGSD8s#ez=NG+A%F+WiK!D0@hJxleM+?Rn|
z6^(L78~H3B?CpR1kn@J@A`1gM*@#J}Y&Z5cb|;3u(493`wpO-gg>qJ%e2Hbx4e5Ve
zRiByuaF(?mT@Yo*nkAXP|HqP25q7I52Xdy=v9gt9tQF~F&b3a+&(Yo?>vnkI2i_OG
zpA@C5Rc#lxTiMy|Z(Uj9F2QuZ@$gN)?Dy=a8@}i87O+_{-*f1A<j^i7;gad^vKeB?
zR@o1VJnz?6GL~xB@Y@7_E&G}B=;GeHEoJZa?@iF3^pByw@%sa{4@oE4<89(Cx7eDx
z6fU^S_htE$V{i55HWfa2&9}yJ+V@)7d0$wzYPkF_U7+vT{!(UJx7D))JtE)#e97Ly
zZP)z2;XR{112|JA?6EZ9HGR)#7ZY&jy{Z1Hw+Hx7)bVsJpO-z|EAL^0^WD1rJO$Ga
z*?zF9;acDH{-b3QWa#VFyN11{)w$p9y*bL;u%ebP#&cux?eHzvr$@1}*L1v8_+ht(
zrFP+(&eO)8#~Z&{7>Ku;F~z%rhWZ%aZ>|;3V|>5AQvTfU!ocG)9~d`WPiXC1JM9u^
z>fmzY<rQxpRfWyiwQDOgpS&!S9YX<oMXIf)%j0(q{81KHtbgtZpJ@8nLFPpdbMS^w
z${Ui?wNCE3dylcb_a6_R|2$4_rx1Jgn)Ww8euYk-1sd;N!Tfpiq1p`z?%_T^^WGi^
z*;1GOA$|3{roD+(9k1Ru*xz~}@T}|WdMneih0OPUeY}zB{a53`e(}}E+H-GBdN2Mh
zdv(CH<Bb*{_VZOZezw+#yAnJ5i_8CLmEy+-8_%x%>{%e9`bv`Thtv_{w>Lzsh4;yA
zG&&P^D4I3*Wc3r~TUt||7Ao+W{1>^bCQ+!ohjD+)PQ_Up_+kz{^P90@_8Zga*?qQq
zh3>T*KJb2aG<a6rnab{CHTA8jX#uMn_aFWx_=D^4y9Ittu?IFiIviP-xA=kV4h|VX
z!wY*E?4Ey6+{5zPd25%$_JFTVr}pIjmbBqbe&`ag?d*4swb!@JWpUk|r<R+)>Cc24
z@-y$tFS59>T4qM;URwbhiT6#p2}T{ejxpNvufOoS{Oq&pFJblP?%8smKHT;|e~m=R
zGuGw{aStwR^cRX}KK1xfO@vq<L-QGz0!fJrJab+j`u9PtMtn`Ri3~TJRdv?$L)#Bl
zZ`_w2wN~kZ?5U<$Z;6bljo0q|W4OV*rnpdo<^3VY2Od79yYJk$b(E~Qd*0^ZYU$h)
z?E-SmJ8x-BKid1Yfqe`62KLqL4~qm$+nDF+>@7MkkXCEoU%TQR?+u=1EdD<}A2?_r
z^X6v<e_ZAI9J3A6O}KwV-Uv22FvD!Y9qv{+#X<+e!zBt87c&<8X6QO!lzNG6c9v-4
zx#HKgsyVZbE=-MYlRx^WP0t}tg4dvN)$LM=%I!NDcHaN?VNsdY2jf${4R&vJ<|p+&
zY+%3r?E&Mj{0~|sZXHtR8@n?v<x8)Ryt~VlFP4vy`F#BYtr@cC*q9?sPfys+Y{SB~
zeEC9|7j=hg6stqGwyE%zd|Sn(>w73Sd^JajE^n;Vi&Nr@x1~<@QoY)^?#{ZO8lN>K
zwzM}iuFspbBhS<{%~k#48uJ6|DzE(D`?9!mKihhy?G1+$89y|9nCEh(g71yz<T?gz
z^SH~qa=X(OJ>~mY`Two;1|1pB^@l_sT$m>zaJ}LDgP0FC>2VjEjc#Z*M=SO#H+wGJ
z>T!cP#P#R1orV8@yIP0qZhu{Q?WcMUpY%c30Q0Y39F_<2t^dP(N`BqXz+?*p!Lt$k
z>l%L_{3&{%`K#56>j&LKFIE&h(0z4>SC66n=Av`f8{|X(tPDEK>m$$RzH}XXb+UKk
zUfo)I)_sg@yAv0D%bIuPPb16w*Kc`^55**;FIa2w;A4e}4`1t@XLD5F6mC;xUhw>(
zXSBwP@)Het`X6?Rol>#pE74}RxnQ&4uaoqHvIb3sSM&HPlD;jGEWOuQ7xGcVg6D_a
zj~o9VEnqv`{=n${_O*P$hiyL?LYFPYu(dla+^4eVL-~vX9g8n-SiAOWY!6;xQ?gTg
zW?aEJhR@-b&Q0Gq_tdtXc?&;XSlale*n(x%?bOQZ#qSQkI|mx1wceo9YtMd<asQ9|
zZ7Y6TKWIIDdPCIr`%~L)Hb-W5+Ah!XbvmYVb><oABVFA`GreC*xz29#TKx6E^Ti9<
zc18UT%a)&Xy@~n0y1n`}u``nWHzx;v=RaupL;Rt_iRqHURWr=e>gO@Pmv=2(pv-5n
zE1Cc6n!YttHf@!i(fsDRg88}Z-@)(i>|A+4`pNpkz8f@scmmIt9*FyO(zg8jru4@r
z8_%xzu6`k7@0pKVc0b;Ki}yy$Uat12MZc^K1Vj0nv=9Dfxwk!%W6`~b4c`l84t=>7
z2pY{5w|by_@tet_UzQJgpX`0$Ss_u@$dj1pq(4P`^7&?dPe^Mv@BIOxHwPsj99FeE
zxK+!g@PUnvg{c{M-RiE@%=_3ki{9bm3zp^DI*(7;`qffCpYwJ7O{Uc^|D|t#>~PrP
z!QUk|G8g~#A8xPgYYBf4_CxN+ieeG=EXxgYGnEr(MR89SXMc2{rTBr2nQ6+Q+;!1;
znY>Z8nm-Co$|nSN?CNK}*FSGh_pH|+ORL0lt!MW#7cO|z!0+>eq4j;G>;KXNs|)16
z*YeD3ustw6VDY=B4ezyYF|WVYB6aeE;?n5}YhSUY*zp}{<a~W#QqDWyh=N0T+Z4B_
z<-Zk?V6JbylYFp+?LvOW;eZ{|d%iz?{QI7^E#DrAo#KBomu~uH_*rBR_rBZPSbtph
zR(`;-gRjQvTjUMCD{~Xx|5YrAjTisH7jbTW$1V;ltC+ofulF$Q3^>%H!B?`B`QM#$
zNe^mw-&6CCx!)RnVD<mS2Ywd@KDPK^;d^uYr8$RZSjkix{MfgM@%%zhceQ6Nn{>(p
zR3DoxvvV*JmZ&LX+G-rbC&w&*@YI4m{JYKWa8x%RKgMYwT@q5C!rBQQ6BoY1=pXTo
z$?dT0huRq;i;fi?h`X8{z+*nozbd<eVOpEh_X!c(W~}sSH`I;$xncUZwO;!UzW3X~
z`H%NpJ`>v>C;bUCWX0O|AKZVy=&@3D@wN1ZUa+~3?=r5x@t^1YftedrTzX$O`0K~T
zX5A}Z^1Si)0uGzklk22TiEAyp&s_haT&jYbXIk8YA2<CuKQ}v1y5IIVQ9hu!nXy1s
z;}g$}yxU)SRtP(~n{*t~KU5+cshRrNL1x8%=FdS-FB;VTUH$HoRPBpn3$@n2vQ@Ma
zxqA8A#MoC8ttRc=bbRTrL!eRA4UeRkyv_W%Q!ntOz8ag#bZeHKYIhh`@!K%f@&92J
zZoclHC?Rp<O4tYW0*lX_6%0HzeDmwr@3j?Plw4EtZO57!rH>qBR;*`c51Q*-d2jct
zcTp2$Ed=+>+AnpjtuL-~v)7dcd^enDFPyq<=bhi>SB>tg8%?#{7nM3o?9=i0-Hhjp
zvLp*s5@J%$e`1X&j|n?x1!`=ZXXJk~`@xD|p_Y!>pH#LPR<QoJl40j7buC;V$H%i|
z(m5Xcbtiv)^D<r((C@EYdZf|M;z47Ll+LYAE444Hj5j-0FOD<3&wJY`JNy-B#Amui
zfmZa_?*@?0*lwAK#-QhicNggCum{Ryq_8hrR(MbT&Vq^3Gq)XWw74L_C*xymllX?;
z*vxJ6r7nvLdhFL0T*=6IE0?@m%WI*90DILoX1mPiwntZ_TVI%eclYY%-8-b7n!kNu
zCA{m0X3m9+9U7ojR08Z(JDKfv80NFv{$PGu{u(5`WBuMAw+^`pR~^0?2btnGnZ~?N
zX_MrSeYfqVF8g`lit~ftMd1eze^B`m^2G3ioeiJ2)52EI9}6xePhDv*`Av6+xE#y<
zlhIr;_3Q!H%1zJkn;&K_V2ud3O*#KL^~8m0+dDq8;K`%BR}K2P`YhKSKJv}F{Q6*`
zZ~fgAu?X2RmTIr$1;1i1#7`*a{9*F$T*8E#(toZ_?tJKEFykLjANzIl8S9w7JAV9d
z;@T4bma3O3TEBu<8a3Y&S^87uF?a?20(R>Mp8rih#I4z1Vx+549oydUb6?meofrna
zc;Pp-p{IBGwNEsVsA>GIvqP3=*88UYeVgoJSau)!X<%N%psXeFrRMLlxLK3Ba{e!y
z%Jn_{)!*H)0BdA^y|F*M`TL8wHE-U}@sWKe&9T3A=Yf0E?S(ez$}q7V+Qie4e&~pS
z?H<nEjrM&D*1XPr!TkB);RokiYMI4Y=D*vjzM`*Hbav3!-;cA6oDb$o<jBJpT{JSg
zZ|Y}nY<%PSVc+b7FM~hSn$*`9ncQJ!V`rXhtJHigE#O-G%#F`eWTzieO3=T!$2w6*
z;s8&Z{9*rv7f!0gHB2m!6mjfNS^mWCOvuWE3u+C0xV|ayo&&eo75Kgsu;(TJ-}8ML
zj{~25*y)A#?=Mb$YP+uUl;qSeJU+{{pGt2y6Z~v<@3fQvt@XEqcJ@8F{Vgz||B{(K
z^Zmy9hP^j6_)MlSXJ4E({nZVn{3-jxj{jP<`c`+(>s7`x-?PUz95#I%&1YXJ^qM_y
z^Mm3`e7dsik5{exlpk|@_Sb#y4%|$+S-si(`ef6Nxtqg(X-7!LH9meQ(ecP(xy*~p
zE4S|7w<)}T>pF$~ahv{HeqFbIPpZG%)>q%V%4*(tSKfBLl`FdAZ0Lb%#X7V5Z0*x+
zE>@OW>#f>a|2W8hZE4h}^<g)k^v-6s?!BFLH*f9i%#T3@dTUsxH?Oz#VG9IJm(5Ff
zt=sN+u6g?b#g5p86Kr|%3+h?cA9}aICUG7ExN*F!X_L(6{SxaK_W!9()QI>O_u)Z6
z9IG7X;RA;MtPfO+W!3Evy~q4Ma1D#>)LdJ(xCZkB+p3b|&+?id*!@B24-41Ni0$Dk
z?|rWjnbs1u$?x=DmTgz(&AlCBdvO0V+mrV#K9sWCq~~xwKfu4hehr`8??9Oh33j%N
zlk^WhbZ9=Y??LA+{nbVmbNEjm5;PD~W6Gan?ejhMhJf4QjSo8imVR)l=d@vTy&&aw
zYxTaT4+|>f(^|P72;UIA#<abXWkvgg_YW9TCj4bu*PbOQaJRp8mAJuveu<dvO^hG(
zcZgi!*unZ{sw~5`hx@9E?|<n}l-%(4d6Urt{)VL4?GwK}dZ6$_{SI?*Ls9&>cDL{6
zx49KAFy_-ab?QG`|Lm*Q!GYI$nZK4DaQH5HCT!IYuS@BT2TwhC#9QotXLmz<(&he!
z#)TIggl;!9o@1zxlMv`qNM|Uzdo-bsi_MYw2<r!NjW5<hZ0$!6mOg8Ba`q@Oxp4E^
zL048E-evI(2ao^y?YPj^Lx-Ka`S;>G^|q3)c#8J3IS9UxQ?8Y_VMu<!H$!^Y<a!hH
zF6Oyq2}M~Se#C6OY`U~5?ybZ8j~l+P4}AW5X_f4<&E?-MYgo*i)&B(soOtk<bKixv
zQ(GGI(jvs)tc^VP>pAn+iqB70@BbVUbiRJ-Wt*DJf|=ZQE#h9Md7`qXKf67-M0#EL
ziMU5DA9O3^-JBK%Yj>`k@s9WSL6Z-R5+|}=TBq0rM;fSJez-whhQs!teVK9>^V^~a
z_EVzv$EpTv@NIc?;K*7X>Fch>8yD@};#BKt@$H|lg}BV5IeapULcTS<**T@U@!O;~
zA2pjV7Zr%k;ry1vQ^t1g<v|6dX_`#krD3mmjd)~?r-<E<ezW3D;<>n}l`9T2WLfj1
zw0=_z<asEyhIf_mp_dorByY@XQeCk8$Mt!C{bqs}&EA!fkZ(MvP%a@c!NxJ4(Y%rI
zp|J#8*7|3jmwTT@d)$6($Z_Md6=&DBo4+eXWA-Wh;gMOk#eIi-#ntVgT?sd*rJTQ1
z+Sh28@Zg7_NL1sIiw2bw4}Ot0NauXLU+m7h-^mZQ?5j?&kYJI$R&t*=>)QjKhIf<q
zo>_iSVW+7N*VZSkjkinYzHb(1DR*A{+g%t^hn+7^P}u!Lucx8^p|FKCk4|ti<H4Ht
zA7|>N^;rDh{crG3)QRC?n;81gRAY-i=XFzo9cLaS)~-LSxInKV-fZct{}Ox71oO4-
z<8YOJr&MdZBKD#8hU4b-s&8v%YR@p2i06Gb`>PE{cT*I599#2y4+9JB+g5CM%&WJj
z9}?f7DRG3|VQucK^Ogzz8!!G{+zBoK7nEBje3-T9nBMfy%hzfd%YJ6t(U%d%<$q_Q
zs7AK`*|_lZln;h4t_D>~Y>Tjy<g8A;cRu)@xy~hD$+v$l2mi@_{%%<Wq)G%gdco}^
za76}=1tgiOIm~nS`d@v_bCM5KUB8w6aPMc$18omGgKHkM<{dl3ZRh(hQrT1|w-Yq(
z{w0jvZS!O1S35qx4xFwrze&$N^zM|Ko%VJL&(1Ub`m)VG{ddaer3RKITc<5!<ZsOV
zv@+*Dzn$EzDuuTxr{*w$y{5p|qt0%&_+FI1VE^l9Hih%UB5$hq*B)IK_bGJI@q^1}
z*9ujuf)>QYYF1C*cDOt*or(FqTEc<fmRr=C@2p%adSVmA6F+t_XHR;%d%^PN_oa5A
zU04q$+_Fy$3-~8+h|727Gx^y18C%|$|6`1KX1zrHpk6^}tK^KR{}CJZc^{bkdt<`A
z-48xJeW(|3&&^O^55vE%i>?n0=5Kmltir?mdN*^8>x!u7lMBtK)b}JZKjXc2iortU
z*a71W{34f_zc0N|s>0T^*Z+2sgA}XrLC&dPtbJnB{Dao=UQ7O4WoW!&vt$11t*nyO
zVMcH7+VRLAT)n}1MbU!UvwuoE{Eusy?*Qt_$aFB9%G{V?Q_peGA$`Nn<wp7Ek2gH$
z{L2t9uY~=;p$8Kl=pUGO$u~jv^nug|p8vff#Ccf!L3_TwrCn=ZdcdKW_lGjqygTvi
zsTVf(txaI~!>z|!%2>l>eJCoyz=mm6>Vl8^u5bR3y+L>1{-_fSdVdqsH@K&62xOD^
zzI4Ld?bo-eN<_>t*Lc~zzu^nC=&s$)pQs-pn8)}%Sn>>iuGiK-$7_uO^ts&+M9kng
z)tJZf`P%FB6zebdKS5(M#4=&Q=J0FFC)v+<xp?Z9$=naar~XvB@N;urdDy2pIt))8
z%JVa4I-X5<(OJb=!Mu&N)M(DNDZlPJeCMd#ws->L+24_u8yer~2fQd(;o;i4MP=sx
zcJ%|_Bs<s+C_UK2xEr*1dhMAlVlV8L_xxU+v)pkRsOSa_k~~|HV6f@$=7z@d{0z^@
z8xqPlyb;;JeIx9$>jLdt_xtxiRtB8hlFHttUv*k_lM3IF#(6IqoTq)OyR!U4P4KFp
z{uAR*Wxd*H6M5=~d8Yr9);3YjW%`FdJ~(-=!Af<@(n^g40nIN0oXksquDY|2e}=h6
z$ZC1X^NxZc&(v$pcpB$EOx}<lb|QC8*qJRxvPX?xhkibk44Pcr9(F13_+Qfv(@e#U
z*Tuhaue>RBW({-mL5FH%i4NU(-R-~adUSLxwodWeB+9Oqml`Pso+;vDmsvM^_l(3_
z{HLDtw}0n~b$BHD;mJMzd9B-0{%)Smb<;TL8{5_CXD5gKzjx@8{4}pWv%hpESRZS$
zT_U;g!W@;Zg~6ZSM2gN@>0zMz{<g%kbZy<u&olYogzjKkw#q;@nEB@80{I-ir@v<Y
z-7lMv`9p0E_wlCJxg4wZoX@+y>+M6aTkS2j&sXrWpNhX?)|FN$)4{y=M&s)n-%Srr
z-f-}cT(tKl@nZgp^JnEwKQho<p?~1(>gvktleWuCJcuddiEtNu;c@NCw<mewk)O(!
zwKp7e_T|3aWV?a=A?pv(FO2O>ya9RPJLmc)DsA{~ZhU3Z!IJy}`8($)ANc&@>jpa=
z)_cEi^U9R{k*VXaJ;>b<c`!GJ_4WbF2X^&rhaKKtwfT9<IQ=fWKHHnWk_Ok?9v`Uf
zf)r^BPBr$<s9!ead+{Y_@6XRC>M`A1y@Wkv+TW&?>e2pBrWI=BD(>LnYjtv;{Nc!7
zy*vEojfdM#%{$xvz~Jes4^kS;$FF|ijJS4Si-rdC5&hXgXY1G8w@H0qctEq-dV0gb
zqYBxv{0EX2{A&LF)?ndp1D0hBTdi-TO~1?Me^70M>WynoY-{QdEH@C-YtwY(W&3{C
zDki;DV+$AO>n+pMB==e~T)C1EP`>}Hz4W~{?t==IS{nLNacsBkgm`v9bI_HOjMWbI
z>#mq;URHhZ^Hj~+lO-u)dlz+jo)1|a(7R{l3F8-~ZfrKUt*g1G-dH_r&oQIxvzNNn
zt=0Lyo}=LD({J0NRv%9N+`RWxmZVGff$aP(9^unxueDPC3@Pm|RI)!?+GSWEcw@2A
z30b~CnT!f{8EwcKwYP-{dR)0-niZ_U?H9poWx*Yi0p8yWUg%Ar(t^Z25f05SeDI$!
Zb?zj!AC{NZ7#J8BJYD@<);T3K0RZce7Rmqs

diff --git a/docs/introduction.md b/docs/introduction.md
index 2d297945d7..e1b6d17fb8 100644
--- a/docs/introduction.md
+++ b/docs/introduction.md
@@ -216,9 +216,6 @@ There are three catergories of metadata:
 Those sets of metadata along with the actual raw and archive data are often transformed,
 passed, stored, etc. by the various nomad modules.
 
-.. figure:: datamodel_metadataflow.png
-   :alt: nomad's metadata flow
-
 ### Implementation
 The different entities have often multiple implementations for different storage systems.
 For example, aspects of calculations are stored in files (raw files, calc metadata, archive data),
diff --git a/examples/domain.py b/examples/domain.py
index 0454da98da..5342cb684a 100644
--- a/examples/domain.py
+++ b/examples/domain.py
@@ -1,5 +1,5 @@
 from nomad import datamodel
 
-print(datamodel.CalcWithMetadata(domain='DFT', calc_id='test').__class__.__name__)
-print(datamodel.CalcWithMetadata(calc_id='test').__class__.__name__)
-print(datamodel.CalcWithMetadata(domain='EMS', calc_id='test').__class__.__name__)
+print(datamodel.EntryMetadata(domain='DFT', calc_id='test').__class__.__name__)
+print(datamodel.EntryMetadata(calc_id='test').__class__.__name__)
+print(datamodel.EntryMetadata(domain='EMS', calc_id='test').__class__.__name__)
diff --git a/gui/src/components/metaInfoBrowser/MetaInfoBrowser.js b/gui/src/components/metaInfoBrowser/MetaInfoBrowser.js
index 73ae7513b9..f6b61bafed 100644
--- a/gui/src/components/metaInfoBrowser/MetaInfoBrowser.js
+++ b/gui/src/components/metaInfoBrowser/MetaInfoBrowser.js
@@ -89,7 +89,7 @@ class MetaInfoBrowser extends Component {
 
   update(pkg) {
     this.props.api.getInfo().then(info => {
-      const domain = info.domains.find(domain => domain.name === 'dft')  // TODO deal with domains
+      const domain = info.domains.find(domain => domain.name === 'dft') // TODO deal with domains
       this.props.api.getMetaInfo(pkg || domain.metainfo.all_package).then(metainfos => {
         const metainfoName = this.props.metainfo || domain.metainfo.root_sections[0]
         const definition = metainfos.get(metainfoName)
@@ -108,7 +108,7 @@ class MetaInfoBrowser extends Component {
 
   init() {
     this.props.api.getInfo().then(info => {
-      const domain = info.domains.find(domain => domain.name === 'dft')  // TODO deal with domains
+      const domain = info.domains.find(domain => domain.name === 'dft') // TODO deal with domains
       this.props.api.getMetaInfo(domain.metainfo.all_package).then(metainfos => {
         const metainfoName = this.props.metainfo || domain.metainfo.root_sections[0]
         const definition = metainfos.get(metainfoName)
diff --git a/nomad/app/__init__.py b/nomad/app/__init__.py
index 0a8d0f71fb..7b1d3a3ee5 100644
--- a/nomad/app/__init__.py
+++ b/nomad/app/__init__.py
@@ -12,11 +12,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 This module comprises the nomad@FAIRDI APIs. Currently there is NOMAD's official api, and
 we will soon at the optimade api. The app module also servers documentation, gui, and
 alive.
-"""
+'''
 from flask import Flask, Blueprint, jsonify, url_for, abort, request
 from flask_restplus import Api
 from flask_cors import CORS
@@ -36,11 +36,11 @@ from . import common
 
 @property  # type: ignore
 def specs_url(self):
-    """
+    '''
     Fixes issue where swagger-ui makes a call to swagger.json over HTTP.
     This can ONLY be used on servers that actually use HTTPS.  On servers that use HTTP,
     this code should not be used at all.
-    """
+    '''
     return url_for(self.endpoint('specs'), _external=True, _scheme='https')
 
 
@@ -49,7 +49,7 @@ if config.services.https:
 
 
 app = Flask(__name__)
-""" The Flask app that serves all APIs. """
+''' The Flask app that serves all APIs. '''
 
 app.config.APPLICATION_ROOT = common.base_path  # type: ignore
 app.config.RESTPLUS_MASK_HEADER = False  # type: ignore
@@ -105,7 +105,7 @@ def handle(error: Exception):
 
 @app.route('/alive')
 def alive():
-    """ Simple endpoint to utilize kubernetes liveness/readiness probing. """
+    ''' Simple endpoint to utilize kubernetes liveness/readiness probing. '''
     return "I am, alive!"
 
 
diff --git a/nomad/app/api/__init__.py b/nomad/app/api/__init__.py
index e154b7e29f..de4ed2fde4 100644
--- a/nomad/app/api/__init__.py
+++ b/nomad/app/api/__init__.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 The official NOMAD API.
 
 There is a separate documentation for the API endpoints from a client perspective.
@@ -22,7 +22,7 @@ There is a separate documentation for the API endpoints from a client perspectiv
 .. automodule:: nomad.app.api.upload
 .. automodule:: nomad.app.api.repo
 .. automodule:: nomad.app.api.archive
-"""
+'''
 
 from .api import blueprint
 from . import info, auth, upload, repo, archive, raw, mirror, dataset
diff --git a/nomad/app/api/api.py b/nomad/app/api/api.py
index 9a949d7039..45df901c6c 100644
--- a/nomad/app/api/api.py
+++ b/nomad/app/api/api.py
@@ -23,7 +23,7 @@ api = Api(
     version='1.0', title='NOMAD API',
     description='Official NOMAD API',
     validate=True)
-""" Provides the flask restplus api instance for the regular NOMAD api"""
+''' Provides the flask restplus api instance for the regular NOMAD api'''
 
 # For some unknown reason it is necessary for each fr api to have a handler.
 # Otherwise the global app error handler won't be called.
diff --git a/nomad/app/api/archive.py b/nomad/app/api/archive.py
index 5c635f7e76..4c9f66dd50 100644
--- a/nomad/app/api/archive.py
+++ b/nomad/app/api/archive.py
@@ -12,10 +12,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 The archive API of the nomad@FAIRDI APIs. This API is about serving processed
 (parsed and normalized) calculation data in nomad's *meta-info* format.
-"""
+'''
 
 from typing import Dict, Any
 from io import BytesIO
@@ -51,11 +51,11 @@ class ArchiveCalcLogResource(Resource):
     @api.response(200, 'Archive data send', headers={'Content-Type': 'application/plain'})
     @authenticate(signature_token=True)
     def get(self, upload_id, calc_id):
-        """
+        '''
         Get calculation processing log.
 
         Calcs are references via *upload_id*, *calc_id* pairs.
-        """
+        '''
         archive_id = '%s/%s' % (upload_id, calc_id)
 
         upload_files = UploadFiles.get(
@@ -85,11 +85,11 @@ class ArchiveCalcResource(Resource):
     @api.response(200, 'Archive data send')
     @authenticate(signature_token=True)
     def get(self, upload_id, calc_id):
-        """
+        '''
         Get calculation data in archive form.
 
         Calcs are references via *upload_id*, *calc_id* pairs.
-        """
+        '''
         archive_id = '%s/%s' % (upload_id, calc_id)
 
         upload_file = UploadFiles.get(
@@ -128,7 +128,7 @@ class ArchiveDownloadResource(Resource):
     @api.response(200, 'File(s) send', headers={'Content-Type': 'application/zip'})
     @authenticate(signature_token=True)
     def get(self):
-        """
+        '''
         Get calculation data in archive form from all query results.
 
         See ``/repo`` endpoint for documentation on the search
@@ -138,7 +138,7 @@ class ArchiveDownloadResource(Resource):
         any files that the user is not authorized to access.
 
         The zip file will contain a ``manifest.json`` with the repository meta data.
-        """
+        '''
         try:
             args = _archive_download_parser.parse_args()
             compress = args.get('compress', False)
@@ -229,7 +229,7 @@ class ArchiveQueryResource(Resource):
     @api.marshal_with(_archive_query_model, skip_none=True, code=200, description='Search results sent')
     @authenticate()
     def post(self):
-        """
+        '''
         Post a query schema and return it filled with archive data.
 
         See ``/repo`` endpoint for documentation on the search
@@ -237,7 +237,7 @@ class ArchiveQueryResource(Resource):
 
         The actual data are in results and a supplementary python code (curl) to
         execute search is in python (curl).
-        """
+        '''
         try:
             data_in = request.get_json()
             scroll = data_in.get('scroll', None)
@@ -323,9 +323,9 @@ class MetainfoResource(Resource):
     @api.response(404, 'The metainfo does not exist')
     @api.response(200, 'Metainfo data send')
     def get(self, metainfo_package_name):
-        """
+        '''
         Get a metainfo definition file.
-        """
+        '''
         try:
             return load_metainfo(metainfo_package_name), 200
         except FileNotFoundError:
@@ -345,7 +345,7 @@ metainfo_main_path = os.path.dirname(os.path.abspath(nomad_meta_info.__file__))
 def load_metainfo(
         package_name_or_dependency: str, dependency_source: str = None,
         loaded_packages: Dict[str, Any] = None) -> Dict[str, Any]:
-    """
+    '''
     Loads the given metainfo package and all its dependencies. Returns a dict with
     all loaded package_names and respective packages.
 
@@ -354,7 +354,7 @@ def load_metainfo(
         dependency_source: The path of the metainfo that uses this function to load a relative dependency.
         loaded_packages: Give a dict and the function will added freshly loaded packages
             to it and return it.
-    """
+    '''
     if loaded_packages is None:
         loaded_packages = {}
 
diff --git a/nomad/app/api/auth.py b/nomad/app/api/auth.py
index 0ba9876d9a..61b1fbc239 100644
--- a/nomad/app/api/auth.py
+++ b/nomad/app/api/auth.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 The API is protected with *keycloak* and *OpenIDConnect*. All API endpoints that require
 or support authentication accept OIDC bearer tokens via HTTP header (``Authentication``).
 These token can be acquired from the NOMAD keycloak server or through the ``/auth`` endpoint
@@ -29,7 +29,7 @@ decorator.
 To allow authentification with signed urls, use this decorator:
 
 .. autofunction:: with_signature_token
-"""
+'''
 from flask import g, request
 from flask_restplus import abort, Resource, fields
 import functools
@@ -69,11 +69,11 @@ api.authorizations = {
 
 
 def _verify_upload_token(token) -> str:
-    """
+    '''
     Verifies the upload token generated with :func:`generate_upload_token`.
 
     Returns: The user UUID or None if the toke could not be verified.
-    """
+    '''
     payload, signature = token.split('.')
     payload = utils.base64_decode(payload)
     signature = utils.base64_decode(signature)
@@ -92,7 +92,7 @@ def _verify_upload_token(token) -> str:
 def authenticate(
         basic: bool = False, upload_token: bool = False, signature_token: bool = False,
         required: bool = False, admin_only: bool = False):
-    """
+    '''
     A decorator to protect API endpoints with authentication. Uses keycloak access
     token to authenticate users. Other methods might apply. Will abort with 401
     if necessary.
@@ -103,7 +103,7 @@ def authenticate(
         signature_token: Also allow signed urls
         required: Authentication is required
         admin_only: Only the admin user is allowed to use the endpoint.
-    """
+    '''
     methods = ['OpenIDConnect Bearer Token']
     if basic:
         methods.append('HTTP Basic Authentication')
@@ -192,7 +192,7 @@ class AuthResource(Resource):
     @api.marshal_with(auth_model, skip_none=True, code=200, description='Auth info send')
     @authenticate(required=True, basic=True)
     def get(self):
-        """
+        '''
         Provides authentication information. This endpoint requires authentification.
         Like all endpoints the OIDC access token based authentification. In additional,
         basic HTTP authentification can be used. This allows to login and acquire an
@@ -202,7 +202,7 @@ class AuthResource(Resource):
         URLs with a ``signature_token`` query parameter, e.g. for file downloads on the
         raw or archive api endpoints; a short ``upload_token`` that is used in
         ``curl`` command line based uploads; and the OIDC JWT access token.
-        """
+        '''
 
         def signature_token():
             expires_at = datetime.datetime.utcnow() + datetime.timedelta(seconds=10)
@@ -239,7 +239,7 @@ class UsersResource(Resource):
     @api.marshal_with(users_model, code=200, description='User suggestions send')
     @api.expect(users_parser, validate=True)
     def get(self):
-        """ Get existing users. """
+        ''' Get existing users. '''
         args = users_parser.parse_args()
 
         return dict(users=infrastructure.keycloak.search_user(args.get('query')))
@@ -248,7 +248,7 @@ class UsersResource(Resource):
     @api.marshal_with(user_model, code=200, skip_none=True, description='User invited')
     @api.expect(user_model, validate=True)
     def put(self):
-        """ Invite a new user. """
+        ''' Invite a new user. '''
         if config.keycloak.oasis:
             abort(400, 'User invide does not work this NOMAD OASIS')
 
@@ -273,10 +273,10 @@ class UsersResource(Resource):
 
 
 def with_signature_token(func):
-    """
+    '''
     A decorator for API endpoint implementations that validates signed URLs. Token to
     sign URLs can be retrieved via the ``/auth`` endpoint.
-    """
+    '''
     @functools.wraps(func)
     @api.response(401, 'Invalid or expired signature token')
     def wrapper(*args, **kwargs):
@@ -302,10 +302,10 @@ def with_signature_token(func):
 
 
 def create_authorization_predicate(upload_id, calc_id=None):
-    """
+    '''
     Returns a predicate that determines if the logged in user has the authorization
     to access the given upload and calculation.
-    """
+    '''
     def func():
         if g.user is None:
             # guest users don't have authorized access to anything
diff --git a/nomad/app/api/common.py b/nomad/app/api/common.py
index f9e0692969..f69f5570b6 100644
--- a/nomad/app/api/common.py
+++ b/nomad/app/api/common.py
@@ -12,9 +12,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 Common data, variables, decorators, models used throughout the API.
-"""
+'''
 from typing import Callable, IO, Set, Tuple, Iterable, Dict, Any
 from flask_restplus import fields
 import zipstream
@@ -24,8 +24,7 @@ from urllib.parse import urlencode
 import sys
 import os.path
 
-from nomad import search, config
-from nomad.datamodel import Domain
+from nomad import search, config, datamodel
 from nomad.app.optimade import filterparser
 from nomad.app.common import RFC3339DateTime, rfc3339DateTime
 from nomad.files import Restricted
@@ -57,7 +56,7 @@ pagination_model = api.model('Pagination', {
     'order_by': fields.String(description='Sorting criterion.'),
     'order': fields.Integer(description='Sorting order -1 for descending, 1 for asceding.')
 })
-""" Model used in responses with pagination. """
+''' Model used in responses with pagination. '''
 
 scroll_model = api.model('Scroll', {
     'scroll': fields.Boolean(default=False, description='Flag if scrolling is enables.'),
@@ -79,13 +78,13 @@ search_model_fields = {
 search_model = api.model('Search', search_model_fields)
 
 query_model_fields = {
-    quantity.qualified_name: fields.Raw(description=quantity.description)
-    for quantity in Domain.all_quantities()}
+    qualified_name: fields.Raw(description=quantity.description)
+    for qualified_name, quantity in search.search_quantities.items()}
 
 query_model_fields.update(**{
     'owner': fields.String(description='The group the calculations belong to.', allow_null=True, skip_none=True),
     'domain': fields.String(description='Specify the domain to search in: %s, default is ``%s``' % (
-        ', '.join(['``%s``' % key for key in Domain.instances.keys()]), config.default_domain)),
+        ', '.join(['``%s``' % domain for domain in datamodel.domains]), config.default_domain)),
     'from_time': fields.Raw(description='The minimum entry time.', allow_null=True, skip_none=True),
     'until_time': fields.Raw(description='The maximum entry time.', allow_null=True, skip_none=True)
 })
@@ -94,7 +93,7 @@ query_model = api.model('Query', query_model_fields)
 
 
 def add_pagination_parameters(request_parser):
-    """ Add pagination parameters to Flask querystring parser. """
+    ''' Add pagination parameters to Flask querystring parser. '''
     request_parser.add_argument(
         'page', type=int, help='The page, starting with 1.', location='args')
     request_parser.add_argument(
@@ -111,7 +110,7 @@ pagination_request_parser = request_parser.copy()
 
 
 def add_scroll_parameters(request_parser):
-    """ Add scroll parameters to Flask querystring parser. """
+    ''' Add scroll parameters to Flask querystring parser. '''
     request_parser.add_argument(
         'scroll', type=bool, help='Enable scrolling')
     request_parser.add_argument(
@@ -119,12 +118,12 @@ def add_scroll_parameters(request_parser):
 
 
 def add_search_parameters(request_parser):
-    """ Add search parameters to Flask querystring parser. """
+    ''' Add search parameters to Flask querystring parser. '''
     # more search parameters
     request_parser.add_argument(
         'domain', type=str,
         help='Specify the domain to search in: %s, default is ``%s``' % (
-            ', '.join(['``%s``' % key for key in Domain.instances.keys()]),
+            ', '.join(['``%s``' % domain for domain in datamodel.domains]),
             config.default_domain))
     request_parser.add_argument(
         'owner', type=str,
@@ -137,20 +136,18 @@ def add_search_parameters(request_parser):
         help='A yyyy-MM-ddTHH:mm:ss (RFC3339) maximum entry time (e.g. upload time)')
 
     # main search parameters
-    for quantity in Domain.all_quantities():
+    for qualified_name, quantity in search.search_quantities.items():
         request_parser.add_argument(
-            quantity.qualified_name, help=quantity.description,
-            action=quantity.argparse_action if quantity.multi else None)
+            qualified_name, help=quantity.description, action=quantity.argparse_action)
 
 
-_search_quantities = set([
-    domain.qualified_name for domain in Domain.all_quantities()])
+_search_quantities = set(search.search_quantities.keys())
 
 
 def apply_search_parameters(search_request: search.SearchRequest, args: Dict[str, Any]):
-    """
+    '''
     Help that adds query relevant request args to the given SearchRequest.
-    """
+    '''
     args = {key: value for key, value in args.items() if value is not None}
 
     # domain
@@ -196,7 +193,7 @@ def apply_search_parameters(search_request: search.SearchRequest, args: Dict[str
 
 
 def calc_route(ns, prefix: str = ''):
-    """ A resource decorator for /<upload>/<calc> based routes. """
+    ''' A resource decorator for /<upload>/<calc> based routes. '''
     def decorator(func):
         ns.route('%s/<string:upload_id>/<string:calc_id>' % prefix)(
             api.doc(params={
@@ -208,7 +205,7 @@ def calc_route(ns, prefix: str = ''):
 
 
 def upload_route(ns, prefix: str = ''):
-    """ A resource decorator for /<upload> based routes. """
+    ''' A resource decorator for /<upload> based routes. '''
     def decorator(func):
         ns.route('%s/<string:upload_id>' % prefix)(
             api.doc(params={
@@ -221,7 +218,7 @@ def upload_route(ns, prefix: str = ''):
 def streamed_zipfile(
         files: Iterable[Tuple[str, str, Callable[[str], IO], Callable[[str], int]]],
         zipfile_name: str, compress: bool = False):
-    """
+    '''
     Creates a response that streams the given files as a streamed zip file. Ensures that
     each given file is only streamed once, based on its filename in the resulting zipfile.
 
@@ -232,17 +229,17 @@ def streamed_zipfile(
         zipfile_name: A name that will be used in the content disposition attachment
             used as an HTTP respone.
         compress: Uses compression. Default is stored only.
-    """
+    '''
 
     streamed_files: Set[str] = set()
 
     def generator():
-        """ Stream a zip file with all files using zipstream. """
+        ''' Stream a zip file with all files using zipstream. '''
         def iterator():
-            """
+            '''
             Replace the directory based iter of zipstream with an iter over all given
             files.
-            """
+            '''
             # the actual contents
             for zipped_filename, file_id, open_io, file_size in files:
                 if zipped_filename in streamed_files:
@@ -286,12 +283,12 @@ def streamed_zipfile(
 
 
 def query_api_url(*args, query_string: Dict[str, Any] = None):
-    """
+    '''
     Creates a API URL.
     Arguments:
         *args: URL path segments after the API base URL
         query_string: A dict with query string parameters
-    """
+    '''
     url = os.path.join(config.api_url(False), *args)
     if query_string is not None:
         url = '%s?%s' % (url, urlencode(query_string, doseq=True))
@@ -300,10 +297,10 @@ def query_api_url(*args, query_string: Dict[str, Any] = None):
 
 
 def query_api_python(*args, **kwargs):
-    """
+    '''
     Creates a string of python code to execute a search query to the repository using
     the requests library.
-    """
+    '''
     url = query_api_url(*args, **kwargs)
     return '''import requests
 response = requests.post("{}")
@@ -311,8 +308,8 @@ data = response.json()'''.format(url)
 
 
 def query_api_curl(*args, **kwargs):
-    """
+    '''
     Creates a string of curl command to execute a search query to the repository.
-    """
+    '''
     url = query_api_url(*args, **kwargs)
     return 'curl -X POST %s -H  "accept: application/json" --output "nomad.json"' % url
diff --git a/nomad/app/api/dataset.py b/nomad/app/api/dataset.py
index 7ff4d87567..5adb190e15 100644
--- a/nomad/app/api/dataset.py
+++ b/nomad/app/api/dataset.py
@@ -49,7 +49,7 @@ class DatasetListResource(Resource):
     @api.expect(list_datasets_parser)
     @authenticate(required=True)
     def get(self):
-        """ Retrieve a list of all datasets of the authenticated user. """
+        ''' Retrieve a list of all datasets of the authenticated user. '''
         args = {
             key: value for key, value in list_datasets_parser.parse_args().items()
             if value is not None}
@@ -76,7 +76,7 @@ class DatasetListResource(Resource):
     @api.expect(dataset_model)
     @authenticate(required=True)
     def put(self):
-        """ Creates a new dataset. """
+        ''' Creates a new dataset. '''
         data = request.get_json()
         if data is None:
             data = {}
@@ -112,7 +112,7 @@ class DatasetResource(Resource):
     @api.marshal_with(dataset_model, skip_none=True, code=200, description='Dateset send')
     @authenticate(required=True)
     def get(self, name: str):
-        """ Retrieve a dataset by name. """
+        ''' Retrieve a dataset by name. '''
         try:
             result = Dataset.m_def.m_x('me').get(user_id=g.user.user_id, name=name)
         except KeyError:
@@ -126,7 +126,7 @@ class DatasetResource(Resource):
     @api.marshal_with(dataset_model, skip_none=True, code=200, description='DOI assigned')
     @authenticate(required=True)
     def post(self, name: str):
-        """ Assign a DOI to the dataset. """
+        ''' Assign a DOI to the dataset. '''
         try:
             result = Dataset.m_def.m_x('me').get(user_id=g.user.user_id, name=name)
         except KeyError:
@@ -168,7 +168,7 @@ class DatasetResource(Resource):
     @api.marshal_with(dataset_model, skip_none=True, code=200, description='Dateset deleted')
     @authenticate(required=True)
     def delete(self, name: str):
-        """ Delete the dataset. """
+        ''' Delete the dataset. '''
         try:
             result = Dataset.m_def.m_x('me').get(user_id=g.user.user_id, name=name)
         except KeyError:
diff --git a/nomad/app/api/info.py b/nomad/app/api/info.py
index e6e87e4378..0630498d38 100644
--- a/nomad/app/api/info.py
+++ b/nomad/app/api/info.py
@@ -12,9 +12,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 API endpoint that deliver backend configuration details.
-"""
+'''
 
 from flask_restplus import Resource, fields
 
@@ -69,7 +69,7 @@ class InfoResource(Resource):
     @api.doc('get_info')
     @api.marshal_with(info_model, skip_none=True, code=200, description='Info send')
     def get(self):
-        """ Return information about the nomad backend and its configuration. """
+        ''' Return information about the nomad backend and its configuration. '''
         codes = [
             parser.code_name
             for parser in parsing.parser_dict.values()
@@ -83,16 +83,13 @@ class InfoResource(Resource):
             'normalizers': [normalizer.__name__ for normalizer in normalizing.normalizers],
             'domains': [
                 {
-                    'name': domain.name,
-                    'quantities': [quantity for quantity in domain.quantities.values()],
-                    'metrics_names': domain.metrics_names,
-                    'aggregations_names': domain.aggregations_names,
+                    'name': domain_name,
                     'metainfo': {
-                        'all_package': domain.metainfo_all_package,
-                        'root_sections': domain.root_sections
+                        'all_package': domain['metainfo_all_package'],
+                        'root_section': domain['root_section']
                     }
                 }
-                for domain in datamodel.Domain.instances.values()
+                for domain_name, domain in datamodel.domains.items()
             ],
             'version': config.version,
             'release': config.release,
diff --git a/nomad/app/api/mirror.py b/nomad/app/api/mirror.py
index 9353600c16..e0794d0da9 100644
--- a/nomad/app/api/mirror.py
+++ b/nomad/app/api/mirror.py
@@ -12,9 +12,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 The mirror API of the nomad@FAIRDI APIs. Allows to export upload metadata.
-"""
+'''
 
 from flask import request
 from flask_restplus import Resource, abort, fields
@@ -82,9 +82,9 @@ class MirrorUploadResource(Resource):
     @api.doc('get_upload_mirror')
     @authenticate(admin_only=True)
     def get(self, upload_id):
-        """
+        '''
         Export upload (and all calc) metadata for mirrors.
-        """
+        '''
         try:
             upload = proc.Upload.get(upload_id)
         except KeyError:
diff --git a/nomad/app/api/raw.py b/nomad/app/api/raw.py
index b08abd4b05..b529c7b439 100644
--- a/nomad/app/api/raw.py
+++ b/nomad/app/api/raw.py
@@ -12,9 +12,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 The raw API of the nomad@FAIRDI APIs. Can be used to retrieve raw calculation files.
-"""
+'''
 
 from typing import IO, Any, Union, List
 import os.path
@@ -71,13 +71,13 @@ _raw_file_from_path_parser.add_argument(
 
 
 class FileView:
-    """
+    '''
     File-like wrapper that restricts the contents to a portion of the file.
     Arguments:
         f: the file-like
         offset: the offset
         length: the amount of bytes
-    """
+    '''
     def __init__(self, f, offset, length):
         self.f = f
         self.f_offset = offset
@@ -110,10 +110,10 @@ class FileView:
 
 def get_raw_file_from_upload_path(
         upload_files, upload_filepath, authorization_predicate, mainfile: str = None):
-    """
+    '''
     Helper method used by func:`RawFileFromUploadPathResource.get` and
     func:`RawFileFromCalcPathResource.get`.
-    """
+    '''
     upload_filepath = upload_filepath.rstrip('/')
 
     if upload_filepath[-1:] == '*':
@@ -197,7 +197,7 @@ class RawFileFromUploadPathResource(Resource):
     @api.expect(_raw_file_from_path_parser, validate=True)
     @authenticate(signature_token=True)
     def get(self, upload_id: str, path: str):
-        """ Get a single raw calculation file, directory contents, or whole directory sub-tree
+        ''' Get a single raw calculation file, directory contents, or whole directory sub-tree
         from a given upload.
 
         The 'upload_id' parameter needs to identify an existing upload.
@@ -223,7 +223,7 @@ class RawFileFromUploadPathResource(Resource):
         match the given path at the start, will be returned as a .zip file body.
         Zip files are streamed; instead of 401 errors, the zip file will just not contain
         any files that the user is not authorized to access.
-        """
+        '''
         # TODO this is a quick fix, since swagger cannot deal with not encoded path parameters
         if path is not None:
             path = urllib.parse.unquote(path)
@@ -258,7 +258,7 @@ class RawFileFromCalcPathResource(Resource):
     @api.expect(_raw_file_from_path_parser, validate=True)
     @authenticate(signature_token=True)
     def get(self, upload_id: str, calc_id: str, path: str):
-        """ Get a single raw calculation file, calculation contents, or all files for a
+        ''' Get a single raw calculation file, calculation contents, or all files for a
         given calculation.
 
         The 'upload_id' parameter needs to identify an existing upload.
@@ -266,7 +266,7 @@ class RawFileFromCalcPathResource(Resource):
 
         This endpoint behaves exactly like /raw/<upload_id>/<path>, but the path is
         now relative to the calculation and not the upload.
-        """
+        '''
         # TODO this is a quick fix, since swagger cannot deal with not encoded path parameters
         if path is not None:
             path = urllib.parse.unquote(path)
@@ -300,11 +300,11 @@ class RawFileFromCalcEmptyPathResource(RawFileFromCalcPathResource):
     @api.expect(_raw_file_from_path_parser, validate=True)
     @authenticate(signature_token=True)
     def get(self, upload_id: str, calc_id: str):
-        """ Get calculation contents.
+        ''' Get calculation contents.
 
         This is basically /raw/calc/<upload_id>/<calc_id>/<path> with an empty path, since
         having an empty path parameter is not possible.
-        """
+        '''
         return super().get(upload_id, calc_id, None)
 
 
@@ -336,11 +336,11 @@ class RawFilesResource(Resource):
     @api.expect(_raw_files_request_model, validate=True)
     @authenticate()
     def post(self, upload_id):
-        """ Download multiple raw calculation files in a .zip file.
+        ''' Download multiple raw calculation files in a .zip file.
 
         Zip files are streamed; instead of 401 errors, the zip file will just not contain
         any files that the user is not authorized to access.
-        """
+        '''
         json_data = request.get_json()
         compress = json_data.get('compress', False)
         files = [file.strip() for file in json_data['files']]
@@ -353,12 +353,12 @@ class RawFilesResource(Resource):
     @api.expect(_raw_files_request_parser, validate=True)
     @authenticate(signature_token=True)
     def get(self, upload_id):
-        """
+        '''
         Download multiple raw calculation files.
         Download multiple raw calculation files in a .zip file.
         Zip files are streamed; instead of 401 errors, the zip file will just not contain
         any files that the user is not authorized to access.
-        """
+        '''
         args = _raw_files_request_parser.parse_args()
 
         files_str = args.get('files')
@@ -401,7 +401,7 @@ class RawFileQueryResource(Resource):
     @api.response(200, 'File(s) send', headers={'Content-Type': 'application/zip'})
     @authenticate(signature_token=True)
     def get(self):
-        """ Download a .zip file with all raw-files for all entries that match the given
+        ''' Download a .zip file with all raw-files for all entries that match the given
         search parameters.
 
         See ``/repo`` endpoint for documentation on the search
@@ -411,7 +411,7 @@ class RawFileQueryResource(Resource):
         any files that the user is not authorized to access.
 
         The zip file will contain a ``manifest.json`` with the repository meta data.
-        """
+        '''
         logger = common.logger.bind(query=urllib.parse.urlencode(request.args, doseq=True))
 
         patterns: List[str] = None
diff --git a/nomad/app/api/repo.py b/nomad/app/api/repo.py
index 6864a2909e..dfd783519b 100644
--- a/nomad/app/api/repo.py
+++ b/nomad/app/api/repo.py
@@ -12,10 +12,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 The repository API of the nomad@FAIRDI APIs. Currently allows to resolve repository
 meta-data.
-"""
+'''
 
 from typing import List, Dict, Any
 from flask_restplus import Resource, abort, fields
@@ -26,7 +26,7 @@ import elasticsearch.helpers
 from datetime import datetime
 
 from nomad import search, utils, datamodel, processing as proc, infrastructure
-from nomad.datamodel import UserMetadata, Dataset, User, Domain
+from nomad.datamodel import Dataset, User, EditableUserMetadata
 from nomad.app import common
 from nomad.app.common import RFC3339DateTime, DotKeyNested
 
@@ -47,12 +47,12 @@ class RepoCalcResource(Resource):
     @api.doc('get_repo_calc')
     @authenticate()
     def get(self, upload_id, calc_id):
-        """
+        '''
         Get calculation metadata in repository form.
 
         Repository metadata only entails the quantities shown in the repository.
         Calcs are references via *upload_id*, *calc_id* pairs.
-        """
+        '''
         try:
             calc = search.Entry.get(calc_id)
         except NotFoundError:
@@ -88,7 +88,7 @@ _search_request_parser.add_argument(
     'exclude', type=str, action='split', help='Excludes the given keys in the returned data.')
 for group_name in search.groups:
     _search_request_parser.add_argument(
-        group_name, type=bool, help=('Return %s group data.' % group_name))
+        'group_%s' % group_name, type=bool, help=('Return %s group data.' % group_name))
     _search_request_parser.add_argument(
         '%s_after' % group_name, type=str,
         help='The last %s id of the last scroll window for the %s group' % (group_name, group_name))
@@ -100,14 +100,14 @@ _repo_calcs_model_fields = {
         'There is a pseudo quantity "total" with a single value "all" that contains the '
         ' metrics over all results. ' % ', '.join(search.metrics_names)))}
 
-for group_name, (group_quantity, _) in search.groups.items():
+for group_name in search.groups:
     _repo_calcs_model_fields[group_name] = (DotKeyNested if '.' in group_name else fields.Nested)(api.model('RepoGroup', {
         'after': fields.String(description='The after value that can be used to retrieve the next %s.' % group_name),
-        'values': fields.Raw(description='A dict with %s as key. The values are dicts with "total" and "examples" keys.' % group_quantity)
+        'values': fields.Raw(description='A dict with %s as key. The values are dicts with "total" and "examples" keys.' % group_name)
     }), skip_none=True)
 
-for quantity in Domain.all_quantities():
-    _repo_calcs_model_fields[quantity.name] = fields.Raw(
+for qualified_name, quantity in search.search_quantities.items():
+    _repo_calcs_model_fields[qualified_name] = fields.Raw(
         description=quantity.description, allow_null=True, skip_none=True)
 
 _repo_calcs_model = api.inherit('RepoCalculations', search_model, _repo_calcs_model_fields)
@@ -121,7 +121,7 @@ class RepoCalcsResource(Resource):
     @api.marshal_with(_repo_calcs_model, skip_none=True, code=200, description='Search results send')
     @authenticate()
     def get(self):
-        """
+        '''
         Search for calculations in the repository form, paginated.
 
         The ``owner`` parameter determines the overall entries to search through.
@@ -151,7 +151,7 @@ class RepoCalcsResource(Resource):
 
         Ordering is determined by ``order_by`` and ``order`` parameters. Default is
         ``upload_time`` in decending order.
-        """
+        '''
 
         try:
             parsed_args = _search_request_parser.parse_args()
@@ -170,7 +170,7 @@ class RepoCalcsResource(Resource):
             metrics: List[str] = request.args.getlist('metrics')
 
             with_statistics = args.get('statistics', False) or \
-                any(args.get(group_name, False) for group_name in search.groups)
+                any(args.get('group_%s' % group_name, False) for group_name in search.groups)
         except Exception as e:
             abort(400, message='bad parameters: %s' % str(e))
 
@@ -196,9 +196,9 @@ class RepoCalcsResource(Resource):
             search_request.default_statistics(metrics_to_use=metrics)
 
             additional_metrics = [
-                metric
-                for group_name, (_, metric) in search.groups.items()
-                if args.get(group_name, False)]
+                group_quantity.metric_name
+                for group_name, group_quantity in search.groups.items()
+                if args.get('group_%s' % group_name, False)]
 
             total_metrics = metrics + additional_metrics
 
@@ -217,13 +217,13 @@ class RepoCalcsResource(Resource):
                 results = search_request.execute_scrolled(scroll_id=scroll_id, size=per_page)
 
             else:
-                for group_name, (group_quantity, _) in search.groups.items():
-                    if args.get(group_name, False):
+                for group_name, group_quantity in search.groups.items():
+                    if args.get('group_%s' % group_name, False):
                         kwargs: Dict[str, Any] = {}
-                        if group_name == 'uploads':
+                        if group_name == 'group_uploads':
                             kwargs.update(order_by='upload_time', order='desc')
                         search_request.quantity(
-                            group_quantity, size=per_page, examples=1,
+                            group_quantity.qualified_name, size=per_page, examples=1,
                             after=request.args.get('%s_after' % group_name, None),
                             **kwargs)
 
@@ -239,9 +239,9 @@ class RepoCalcsResource(Resource):
                 if 'quantities' in results:
                     quantities = results.pop('quantities')
 
-                for group_name, (group_quantity, _) in search.groups.items():
-                    if args.get(group_name, False):
-                        results[group_name] = quantities[group_quantity]
+                for group_name, group_quantity in search.groups.items():
+                    if args.get('group_%s' % group_name, False):
+                        results[group_name] = quantities[group_quantity.qualified_name]
 
             # build python code/curl snippet
             code_args = dict(request.args)
@@ -265,13 +265,13 @@ _query_model_parameters = {
     'until_time': RFC3339DateTime(description='A yyyy-MM-ddTHH:mm:ss (RFC3339) maximum entry time (e.g. upload time)')
 }
 
-for quantity in datamodel.Domain.all_quantities():
-    if quantity.multi and quantity.argparse_action is None:
+for qualified_name, quantity in search.search_quantities.items():
+    if quantity.many_and:
         def field(**kwargs):
             return fields.List(fields.String(**kwargs))
     else:
         field = fields.String
-    _query_model_parameters[quantity.name] = field(description=quantity.description)
+    _query_model_parameters[qualified_name] = field(description=quantity.description)
 
 _repo_query_model = api.model('RepoQuery', _query_model_parameters, skip_none=True)
 
@@ -296,13 +296,16 @@ _repo_edit_model = api.model('RepoEdit', {
     'actions': fields.Nested(
         api.model('RepoEditActions', {
             quantity.name: repo_edit_action_field(quantity)
-            for quantity in UserMetadata.m_def.all_quantities.values()
+            for quantity in EditableUserMetadata.m_def.definitions
         }), skip_none=True,
         description='Each action specifies a single value (even for multi valued quantities).'),
     'success': fields.Boolean(description='If the overall edit can/could be done. Only in API response.'),
     'message': fields.String(description='A message that details the overall edit result. Only in API response.')
 })
 
+_editable_quantities = {
+    quantity.name: quantity for quantity in EditableUserMetadata.m_def.definitions}
+
 
 def edit(parsed_query: Dict[str, Any], mongo_update: Dict[str, Any] = None, re_index=True) -> List[str]:
     # get all calculations that have to change
@@ -327,8 +330,8 @@ def edit(parsed_query: Dict[str, Any], mongo_update: Dict[str, Any] = None, re_i
         if re_index:
             def elastic_updates():
                 for calc in proc.Calc.objects(calc_id__in=calc_ids):
-                    entry = search.Entry.from_calc_with_metadata(
-                        datamodel.CalcWithMetadata(**calc['metadata']))
+                    entry = search.create_entry(
+                        datamodel.EntryMetadata.m_from_dict(calc['metadata']))
                     entry = entry.to_dict(include_meta=True)
                     entry['_op_type'] = 'index'
                     yield entry
@@ -345,7 +348,7 @@ def edit(parsed_query: Dict[str, Any], mongo_update: Dict[str, Any] = None, re_i
 
 
 def get_uploader_ids(query):
-    """ Get all the uploader from the query, to check coauthers and shared_with for uploaders. """
+    ''' Get all the uploader from the query, to check coauthers and shared_with for uploaders. '''
     search_request = search.SearchRequest()
     apply_search_parameters(search_request, query)
     search_request.quantity(name='uploader_id')
@@ -360,7 +363,7 @@ class EditRepoCalcsResource(Resource):
     @api.marshal_with(_repo_edit_model, skip_none=True, code=200, description='Edit verified/performed')
     @authenticate()
     def post(self):
-        """ Edit repository metadata. """
+        ''' Edit repository metadata. '''
 
         # basic body parsing and some semantic checks
         json_data = request.get_json()
@@ -382,9 +385,10 @@ class EditRepoCalcsResource(Resource):
         parsed_query = {}
         for quantity_name, value in query.items():
             if quantity_name in _search_quantities:
-                quantity = datamodel.Domain.get_quantity(quantity_name)
-                if quantity.multi and quantity.argparse_action == 'split' and not isinstance(value, list):
-                    value = value.split(',')
+                quantity = search.search_quantities[quantity_name]
+                if quantity.many:
+                    if not isinstance(value, list):
+                        value = value.split(',')
                 parsed_query[quantity_name] = value
         parsed_query['owner'] = owner
         parsed_query['domain'] = query.get('domain')
@@ -398,7 +402,7 @@ class EditRepoCalcsResource(Resource):
 
         with utils.timer(common.logger, 'edit verified'):
             for action_quantity_name, quantity_actions in actions.items():
-                quantity = UserMetadata.m_def.all_quantities.get(action_quantity_name)
+                quantity = _editable_quantities.get(action_quantity_name)
                 if quantity is None:
                     abort(400, 'Unknown quantity %s' % action_quantity_name)
 
@@ -564,7 +568,7 @@ class RepoQuantityResource(Resource):
     @api.marshal_with(_repo_quantity_values_model, skip_none=True, code=200, description='Search results send')
     @authenticate()
     def get(self, quantity: str):
-        """
+        '''
         Retrieve quantity values from entries matching the search.
 
         You can use the various quantities to search/filter for. For some of the
@@ -580,7 +584,7 @@ class RepoQuantityResource(Resource):
         The result will contain a 'quantity' key with quantity values and the "after"
         value. There will be upto 'size' many values. For the rest of the values use the
         "after" parameter in another request.
-        """
+        '''
 
         search_request = search.SearchRequest()
         args = {
@@ -631,7 +635,7 @@ class RepoQuantitiesResource(Resource):
     @api.marshal_with(_repo_quantities_model, skip_none=True, code=200, description='Search results send')
     @authenticate()
     def get(self):
-        """
+        '''
         Retrieve quantity values for multiple quantities at once.
 
         You can use the various quantities to search/filter for. For some of the
@@ -645,7 +649,7 @@ class RepoQuantitiesResource(Resource):
 
         The result will contain a 'quantities' key with a dict of quantity names and the
         retrieved values as values.
-        """
+        '''
 
         search_request = search.SearchRequest()
         args = {
diff --git a/nomad/app/api/upload.py b/nomad/app/api/upload.py
index 8655a0e8db..cb04f51589 100644
--- a/nomad/app/api/upload.py
+++ b/nomad/app/api/upload.py
@@ -12,11 +12,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 The upload API of the nomad@FAIRDI APIs. Provides endpoints to upload files and
 get the processing status of uploads.
-"""
+'''
 
+from typing import Dict, Any
 from flask import g, request, Response
 from flask_restplus import Resource, fields, abort
 from datetime import datetime
@@ -44,8 +45,8 @@ ns = api.namespace(
 
 class CalcMetadata(fields.Raw):
     def format(self, value):
-        calc_with_metadata = datamodel.CalcWithMetadata(**value)
-        return search.Entry.from_calc_with_metadata(calc_with_metadata).to_dict()
+        entry_metadata = datamodel.EntryMetadata.m_from_dict(value)
+        return search.create_entry(entry_metadata).to_dict()
 
 
 proc_model = api.model('Processing', {
@@ -141,10 +142,10 @@ def disable_marshalling(f):
 
 
 def marshal_with(*args, **kwargs):
-    """
+    '''
     A special version of the RESTPlus marshal_with decorator that allows to disable
     marshalling at runtime by raising DisableMarshalling.
-    """
+    '''
     def decorator(func):
         @api.marshal_with(*args, **kwargs)
         def with_marshalling(*args, **kwargs):
@@ -175,7 +176,7 @@ class UploadListResource(Resource):
     @api.expect(upload_list_parser)
     @authenticate(required=True)
     def get(self):
-        """ Get the list of all uploads from the authenticated user. """
+        ''' Get the list of all uploads from the authenticated user. '''
         try:
             state = request.args.get('state', 'unpublished')
             name = request.args.get('name', None)
@@ -220,7 +221,7 @@ class UploadListResource(Resource):
     @marshal_with(upload_model, skip_none=True, code=200, description='Upload received')
     @authenticate(required=True, upload_token=True)
     def put(self):
-        """
+        '''
         Upload a file and automatically create a new upload in the process.
         Can be used to upload files via browser or other http clients like curl.
         This will also start the processing of the upload.
@@ -237,7 +238,7 @@ class UploadListResource(Resource):
 
         There is a general limit on how many unpublished uploads a user can have. Will
         return 400 if this limit is exceeded.
-        """
+        '''
         # check existence of local_path if local_path is used
         local_path = request.args.get('local_path')
         if local_path:
@@ -345,12 +346,12 @@ class UploadResource(Resource):
     @api.expect(pagination_request_parser)
     @authenticate(required=True)
     def get(self, upload_id: str):
-        """
+        '''
         Get an update for an existing upload.
 
         Will not only return the upload, but also its calculations paginated.
         Use the pagination params to determine the page.
-        """
+        '''
         try:
             upload = Upload.get(upload_id)
         except KeyError:
@@ -398,12 +399,12 @@ class UploadResource(Resource):
     @api.marshal_with(upload_model, skip_none=True, code=200, description='Upload deleted')
     @authenticate(required=True)
     def delete(self, upload_id: str):
-        """
+        '''
         Delete an existing upload.
 
         Only uploads that are sill in staging, not already deleted, not still uploaded, and
         not currently processed, can be deleted.
-        """
+        '''
         try:
             upload = Upload.get(upload_id)
         except KeyError:
@@ -436,7 +437,7 @@ class UploadResource(Resource):
     @api.expect(upload_operation_model)
     @authenticate(required=True)
     def post(self, upload_id):
-        """
+        '''
         Execute an upload operation. Available operations are ``publish`` and ``re-process``
 
         Publish accepts further meta data that allows to provide coauthors, comments,
@@ -449,7 +450,7 @@ class UploadResource(Resource):
         Re-process will re-process the upload and produce updated repository metadata and
         archive. Only published uploads that are not processing at the moment are allowed.
         Only for uploads where calculations have been processed with an older nomad version.
-        """
+        '''
         try:
             upload = Upload.get(upload_id)
         except KeyError:
@@ -464,12 +465,18 @@ class UploadResource(Resource):
 
         operation = json_data.get('operation')
 
-        metadata = json_data.get('metadata', {})
-        for key in metadata:
-            if key.startswith('_'):
+        user_metadata: Dict[str, Any] = json_data.get('metadata', {})
+        metadata: Dict[str, Any] = {}
+        for user_key in user_metadata:
+            if user_key.startswith('_'):
                 if not g.user.is_admin:
                     abort(401, message='Only admin users can use _metadata_keys.')
-                break
+
+                key = user_key[1:]
+            else:
+                key = user_key
+
+            metadata[key] = user_metadata[user_key]
 
         if operation == 'publish':
             if upload.tasks_running:
@@ -519,7 +526,7 @@ class UploadCommandResource(Resource):
     @api.marshal_with(upload_command_model, code=200, description='Upload command send')
     @authenticate(required=True)
     def get(self):
-        """ Get url and example command for shell based uploads. """
+        ''' Get url and example command for shell based uploads. '''
         token = generate_upload_token(g.user)
         upload_url = '%s/uploads/?token=%s' % (config.api_url(ssl=False), token)
         upload_url_with_name = upload_url + '&name=<name>'
diff --git a/nomad/app/common.py b/nomad/app/common.py
index 3ca5c29995..b4c5323b86 100644
--- a/nomad/app/common.py
+++ b/nomad/app/common.py
@@ -22,10 +22,10 @@ from nomad import config
 
 
 logger: BoundLogger = None
-""" A logger pre configured with information about the current request. """
+''' A logger pre configured with information about the current request. '''
 
 base_path = config.services.api_base_path
-""" Provides the root path of the nomad APIs. """
+''' Provides the root path of the nomad APIs. '''
 
 
 class RFC3339DateTime(fields.DateTime):
@@ -41,7 +41,7 @@ rfc3339DateTime = RFC3339DateTime()
 
 
 class DotKeyFieldMixin:
-    """ Allows use of flask_restplus fields with '.' in key names. By default, '.'
+    ''' Allows use of flask_restplus fields with '.' in key names. By default, '.'
     is used as a separator for accessing nested properties. Mixin prevents this,
     allowing fields to use '.' in the key names.
 
@@ -53,7 +53,7 @@ class DotKeyFieldMixin:
 
     flask_restplus tries to fetch values for data['my']['dot']['field'] instead
     of data['my.dot.field'] which is the desired behaviour in this case.
-    """
+    '''
 
     def output(self, key, obj, **kwargs):
         transformed_obj = {k.replace(".", "___"): v for k, v in obj.items()}
@@ -67,10 +67,10 @@ class DotKeyFieldMixin:
 
     @contextmanager
     def toggle_attribute(self):
-        """ Context manager to temporarily set self.attribute to None
+        ''' Context manager to temporarily set self.attribute to None
 
         Yields self.attribute before setting to None
-        """
+        '''
         attribute = self.attribute
         self.attribute = None
         yield attribute
diff --git a/nomad/app/optimade/__init__.py b/nomad/app/optimade/__init__.py
index b2573598d8..913892a96a 100644
--- a/nomad/app/optimade/__init__.py
+++ b/nomad/app/optimade/__init__.py
@@ -12,9 +12,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 The optimade implementation of NOMAD.
-"""
+'''
 
 from flask import Blueprint
 from flask_restplus import Api
diff --git a/nomad/app/optimade/api.py b/nomad/app/optimade/api.py
index 6a9057b472..c974d712e2 100644
--- a/nomad/app/optimade/api.py
+++ b/nomad/app/optimade/api.py
@@ -26,7 +26,7 @@ base_url = 'http://%s/%s/optimade' % (
 
 
 def url(endpoint: str = None, **kwargs):
-    """ Returns the full optimade api url (for a given endpoint) including query parameters. """
+    ''' Returns the full optimade api url (for a given endpoint) including query parameters. '''
     if endpoint is None:
         url = base_url
     else:
@@ -43,7 +43,7 @@ api = Api(
     version='1.0', title='NOMAD\'s OPTiMaDe API implementation',
     description='NOMAD\'s OPTiMaDe API implementation, version 0.10.0.',
     validate=True)
-""" Provides the flask restplust api instance for the optimade api"""
+''' Provides the flask restplust api instance for the optimade api'''
 
 
 # For some unknown reason it is necessary for each fr api to have a handler.
diff --git a/nomad/app/optimade/endpoints.py b/nomad/app/optimade/endpoints.py
index 9efb314e01..5f41a8f292 100644
--- a/nomad/app/optimade/endpoints.py
+++ b/nomad/app/optimade/endpoints.py
@@ -41,9 +41,9 @@ def base_request_args():
 
 
 def base_search_request():
-    """ Creates a search request for all public and optimade enabled data. """
+    ''' Creates a search request for all public and optimade enabled data. '''
     return search.SearchRequest().owner('all', None).query(
-        Q('exists', field='dft.optimade.nelements'))  # TODO use the elastic annotations when done
+        Q('exists', field='dft.optimade.elements'))  # TODO use the elastic annotations when done
 
 
 @ns.route('/calculations')
@@ -53,7 +53,7 @@ class CalculationList(Resource):
     @api.expect(entry_listing_endpoint_parser, validate=True)
     @api.marshal_with(json_api_list_response_model, skip_none=True, code=200)
     def get(self):
-        """ Retrieve a list of calculations that match the given Optimade filter expression. """
+        ''' Retrieve a list of calculations that match the given Optimade filter expression. '''
         request_fields = base_request_args()
 
         try:
@@ -106,7 +106,7 @@ class Calculation(Resource):
     @api.expect(single_entry_endpoint_parser, validate=True)
     @api.marshal_with(json_api_single_response_model, skip_none=True, code=200)
     def get(self, id: str):
-        """ Retrieve a single calculation for the given id. """
+        ''' Retrieve a single calculation for the given id. '''
         request_fields = base_request_args()
         search_request = base_search_request().search_parameters(calc_id=id)
 
@@ -134,7 +134,7 @@ class CalculationInfo(Resource):
     @api.expect(base_endpoint_parser, validate=True)
     @api.marshal_with(json_api_info_response_model, skip_none=True, code=200)
     def get(self):
-        """ Returns information relating to the API implementation- """
+        ''' Returns information relating to the API implementation- '''
         base_request_args()
 
         result = {
@@ -160,7 +160,7 @@ class Info(Resource):
     @api.expect(base_endpoint_parser, validate=True)
     @api.marshal_with(json_api_single_response_model, skip_none=True, code=200)
     def get(self):
-        """ Returns information relating to the API implementation- """
+        ''' Returns information relating to the API implementation- '''
         base_request_args()
 
         result = {
diff --git a/nomad/app/optimade/filterparser.py b/nomad/app/optimade/filterparser.py
index c238709292..a95e78100a 100644
--- a/nomad/app/optimade/filterparser.py
+++ b/nomad/app/optimade/filterparser.py
@@ -20,17 +20,17 @@ from nomad.metainfo.optimade import OptimadeEntry
 
 
 class FilterException(Exception):
-    """ Raised on parsing a filter expression with syntactic of semantic errors. """
+    ''' Raised on parsing a filter expression with syntactic of semantic errors. '''
     pass
 
 
 quantities: Dict[str, Quantity] = {
     q.name: Quantity(
         q.name, es_field='dft.optimade.%s' % q.name,
-        elastic_mapping_type=q.m_annotations['elastic']['type'])
+        elastic_mapping_type=q.m_x('search').es_mapping.__class__)
 
     for q in OptimadeEntry.m_def.all_quantities.values()
-    if 'elastic' in q.m_annotations}
+    if 'search' in q.m_annotations}
 
 quantities['elements'].length_quantity = quantities['nelements']
 quantities['dimension_types'].length_quantity = quantities['dimension_types']
@@ -43,7 +43,7 @@ _transformer = Transformer(quantities=quantities.values())
 
 
 def parse_filter(filter_str: str) -> Q:
-    """ Parses the given optimade filter str and returns a suitable elastic search query.
+    ''' Parses the given optimade filter str and returns a suitable elastic search query.
 
     Arguments:
         filter_str: Can be direct user input with no prior processing.
@@ -51,7 +51,7 @@ def parse_filter(filter_str: str) -> Q:
     Raises:
         FilterException: If the given str cannot be parsed, or if there are any semantic
             errors in the given expression.
-    """
+    '''
 
     try:
         parse_tree = _parser.parse(filter_str)
diff --git a/nomad/app/optimade/models.py b/nomad/app/optimade/models.py
index ac0643b283..9f423a1712 100644
--- a/nomad/app/optimade/models.py
+++ b/nomad/app/optimade/models.py
@@ -12,9 +12,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 All the API flask restplus models.
-"""
+'''
 
 from typing import Set
 from flask_restplus import fields
@@ -23,7 +23,7 @@ import math
 
 from nomad import config
 from nomad.app.common import RFC3339DateTime
-from nomad.datamodel import CalcWithMetadata
+from nomad.datamodel import EntryMetadata
 
 from .api import api, base_url, url
 
@@ -235,7 +235,7 @@ json_api_calculation_info_model = api.model('CalculationInfo', {
 
 
 class CalculationDataObject:
-    def __init__(self, calc: CalcWithMetadata, request_fields: Set[str] = None):
+    def __init__(self, calc: EntryMetadata, request_fields: Set[str] = None):
 
         def include(key):
             if request_fields is None or (key in request_fields):
@@ -243,7 +243,7 @@ class CalculationDataObject:
 
             return False
 
-        attrs = {key: value for key, value in calc['optimade'].items() if include(key)}
+        attrs = {key: value for key, value in calc.dft.optimade.m_to_dict().items() if include(key)}
 
         self.type = 'calculation'
         self.id = calc.calc_id
diff --git a/nomad/archive.py b/nomad/archive.py
index f68c5d8539..67963f1d02 100644
--- a/nomad/archive.py
+++ b/nomad/archive.py
@@ -33,12 +33,12 @@ class ArchiveError(Exception):
 
 
 class TOCPacker(Packer):
-    """
+    '''
     A special msgpack packer that records a TOC while packing.
 
     Uses a combination of the pure python msgpack fallback packer and the "real"
     c-based packing.
-    """
+    '''
     def __init__(self, toc_depth: int, *args, **kwargs):
         self.toc_depth = toc_depth
         self.toc: Dict[str, Any] = None
@@ -403,7 +403,7 @@ class ArchiveReader(ArchiveObject):
 def write_archive(
         path_or_file: Union[str, BytesIO], n_entries: int, data: Iterable[Tuple[str, Any]],
         entry_toc_depth: int = 2) -> None:
-    """
+    '''
     Writes a msgpack-based archive file. The file contents will be a valid msgpack-object.
     The data will contain extra table-of-contents (TOC) objects that map some keys to
     positions in the file. Data can be partially read from these positions and deserialized
@@ -456,14 +456,14 @@ def write_archive(
         data: The file contents as an iterator of entry id, data tuples.
         entry_toc_depth: The depth of the table of contents in each entry. Only objects will
             count for calculating the depth.
-    """
+    '''
     with ArchiveWriter(path_or_file, n_entries, entry_toc_depth=entry_toc_depth) as writer:
         for uuid, entry in data:
             writer.add(uuid, entry)
 
 
 def read_archive(file_or_path: str, **kwargs) -> ArchiveReader:
-    """
+    '''
     Allows to read a msgpack-based archive.
 
     Arguments:
@@ -475,7 +475,7 @@ def read_archive(file_or_path: str, **kwargs) -> ArchiveReader:
         A mapping (dict-like) that can be used to access the archive data. The mapping
         will lazyly load data as it is used. The mapping needs to be closed or used within
         a 'with' statement to free the underlying file resource after use.
-    """
+    '''
 
     return ArchiveReader(file_or_path, **kwargs)
 
diff --git a/nomad/archive_query.py b/nomad/archive_query.py
index 893c39e099..cf1af8b3d5 100644
--- a/nomad/archive_query.py
+++ b/nomad/archive_query.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 Contains interfaces to the archive metainfo and query.
 
 In module ``ArchiveMetainfo``, the data is provided either from raw
@@ -32,7 +32,7 @@ and a query schema similar to the archive json format can be provided to filter
     metainfo = q.query()
     for c in metainfo.calcs:
         print(c.section_run.section_single_configuration_calculation[0]({'energy_total':'*'}))
-"""
+'''
 
 import numpy as np
 import requests
@@ -47,11 +47,11 @@ from nomad.cli.client.client import KeycloakAuthenticator
 
 
 class ArchiveMetainfo:
-    """
+    '''
     Converts archive data in json format to the new nomad metainfo model
     Arguments:
         archive_data: the archive data in json format
-    """
+    '''
     def __init__(self, archive_data: List[Dict[str, Any]]):
         self._archive_data = archive_data
         self.metainfo = None
@@ -107,10 +107,10 @@ class ArchiveMetainfo:
 
     @property
     def calcs(self):
-        """
+        '''
         Calculations in metainfo form which can be actively queried by using the get
         functionality and providing a schema
-        """
+        '''
         if not self._calcs:
             self._init_calcs()
         for calc_id, calc in self._calcs.items():
@@ -126,9 +126,9 @@ class ArchiveMetainfo:
 
     @property
     def base_metacls(self):
-        """
+        '''
         The base metaclass to apply a calculation
-        """
+        '''
         if self._base_metacls is None:
             name = self._prefix
             self._base_metacls = self._build_meta_cls(self.base_data, name)
diff --git a/nomad/cli/__init__.py b/nomad/cli/__init__.py
index a04890b79d..7fb192f395 100644
--- a/nomad/cli/__init__.py
+++ b/nomad/cli/__init__.py
@@ -12,13 +12,13 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 Command line interface (CLI) for nomad. Provides a group/sub-command structure, think git,
 that offers various functionality to the command line user.
 
 Use it from the command line with ``nomad --help`` or ``python -m nomad.cli --help`` to learn
 more.
-"""
+'''
 
 from nomad.utils import POPO
 
diff --git a/nomad/cli/admin/admin.py b/nomad/cli/admin/admin.py
index c44107b25c..8ee72493e5 100644
--- a/nomad/cli/admin/admin.py
+++ b/nomad/cli/admin/admin.py
@@ -158,9 +158,7 @@ def lift_embargo(dry, parallel):
                 uploads_to_repack.append(upload)
                 upload.save()
 
-                upload_with_metadata = upload.to_upload_with_metadata()
-                calcs = upload_with_metadata.calcs
-                search.index_all(calcs)
+                search.index_all(upload.entries_metadata())
 
     if not dry:
         __run_processing(uploads_to_repack, parallel, lambda upload: upload.re_pack(), 're-packing')
@@ -182,8 +180,8 @@ def index(threads, dry):
             for calc in proc.Calc.objects():
                 eta.add()
                 entry = None
-                entry = search.Entry.from_calc_with_metadata(
-                    datamodel.CalcWithMetadata(**calc.metadata))
+                entry = search.create_entry(
+                    datamodel.EntryMetadata.m_from_dict(calc.metadata))
                 entry = entry.to_dict(include_meta=True)
                 entry['_op_type'] = 'index'
                 yield entry
@@ -335,20 +333,20 @@ AllowEncodedSlashes On
 
 
 def write_prototype_data_file(aflow_prototypes: dict, filepath) -> None:
-    """Writes the prototype data file in a compressed format to a python
+    '''Writes the prototype data file in a compressed format to a python
     module.
 
     Args:
         aflow_prototypes
-    """
+    '''
     class NoIndent(object):
         def __init__(self, value):
             self.value = value
 
     class NoIndentEncoder(json.JSONEncoder):
-        """A custom JSON encoder that can pretty-print objects wrapped in the
+        '''A custom JSON encoder that can pretty-print objects wrapped in the
         NoIndent class.
-        """
+        '''
         def __init__(self, *args, **kwargs):
             super(NoIndentEncoder, self).__init__(*args, **kwargs)
             self.kwargs = dict(kwargs)
diff --git a/nomad/cli/admin/migration.py b/nomad/cli/admin/migration.py
index 72a4de7d7e..a515b66dd7 100644
--- a/nomad/cli/admin/migration.py
+++ b/nomad/cli/admin/migration.py
@@ -20,7 +20,7 @@ import datetime
 import json
 
 from nomad import utils, processing as proc, search
-from nomad.datamodel import CalcWithMetadata
+from nomad.datamodel import EntryMetadata
 from nomad.cli.client.mirror import transform_reference, tarnsform_user_id, transform_dataset
 
 
@@ -28,14 +28,14 @@ __logger = utils.get_logger(__name__)
 
 
 class SourceCalc(Document):
-    """
+    '''
     Mongo document used as a calculation, upload, and metadata db and index
     build from a given source db. Each :class:`SourceCacl` entry relates
     a pid, mainfile, upload "id" with each other for a corressponding calculation.
     It might alos contain the user metadata. The uploads are "id"ed via the
     specific path segment that identifies an upload on the CoE repo FS(s) without
     any prefixes (e.g. $EXTRACTED, /data/upload, etc.)
-    """
+    '''
     pid = IntField(primary_key=True)
     mainfile = StringField()
     upload = StringField()
@@ -53,14 +53,14 @@ class SourceCalc(Document):
 
 
 def update_user_metadata(bulk_size: int = 1000, update_index: bool = False, **kwargs):
-    """ Goes through the whole source index to sync differences between repo user metadata
+    ''' Goes through the whole source index to sync differences between repo user metadata
     and metadata in fairdi.
 
     It goes through the source index calc by calc, working in bulks. Getting the samedata
     for fairdi and updating the different calcs in mongo. Will only update user metadata.
 
     Uses kwargs as filters for the used source index query.
-    """
+    '''
     logger = utils.get_logger(__name__)
     start_time = time.time()
 
@@ -96,7 +96,7 @@ def update_user_metadata(bulk_size: int = 1000, update_index: bool = False, **kw
                         important_changes['missing_calcs'].setdefault(source.upload, []).append(source.pid)
                         continue
 
-                target_metadata = CalcWithMetadata(**target.metadata)
+                target_metadata = EntryMetadata(**target.metadata)
                 source_metadata_normalized: Dict[str, Any] = dict(
                     comment=source.metadata.get('comment'),
                     references={transform_reference(ref) for ref in source.metadata['references']},
diff --git a/nomad/cli/admin/uploads.py b/nomad/cli/admin/uploads.py
index 7d7feb6271..3fe4d3e87f 100644
--- a/nomad/cli/admin/uploads.py
+++ b/nomad/cli/admin/uploads.py
@@ -144,8 +144,7 @@ def chown(ctx, username, uploads):
 
     for upload in uploads:
         upload.user_id = user.user_id
-        upload_with_metadata = upload.to_upload_with_metadata()
-        calcs = upload_with_metadata.calcs
+        calcs = upload.entries_metadata()
 
         def create_update(calc):
             return UpdateOne(
@@ -155,8 +154,7 @@ def chown(ctx, username, uploads):
         proc.Calc._get_collection().bulk_write([create_update(calc) for calc in calcs])
         upload.save()
 
-        upload_with_metadata = upload.to_upload_with_metadata()
-        calcs = upload_with_metadata.calcs
+        calcs = upload.entries_metadata()
         search.index_all(calcs, do_refresh=False)
         search.refresh()
 
@@ -194,8 +192,7 @@ def index(ctx, uploads):
 
     i, failed = 0, 0
     for upload in uploads:
-        upload_with_metadata = upload.to_upload_with_metadata()
-        calcs = upload_with_metadata.calcs
+        calcs = upload.entries_metadata()
         failed += search.index_all(calcs)
         i += 1
 
diff --git a/nomad/cli/client/client.py b/nomad/cli/client/client.py
index 48c7265a76..d846a42f8f 100644
--- a/nomad/cli/client/client.py
+++ b/nomad/cli/client/client.py
@@ -65,7 +65,7 @@ def __create_client(
         user: str = nomad_config.client.user,
         password: str = nomad_config.client.password,
         ssl_verify: bool = True, use_token: bool = True):
-    """ A factory method to create the client. """
+    ''' A factory method to create the client. '''
     if not ssl_verify:
         import warnings
         warnings.filterwarnings("ignore")
diff --git a/nomad/cli/client/integrationtests.py b/nomad/cli/client/integrationtests.py
index 1baca89ed3..f22dde7a6f 100644
--- a/nomad/cli/client/integrationtests.py
+++ b/nomad/cli/client/integrationtests.py
@@ -12,10 +12,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 A command that runs some example operations on a working nomad@FAIRDI installation
 as a final integration test.
-"""
+'''
 
 import time
 import os
diff --git a/nomad/cli/client/local.py b/nomad/cli/client/local.py
index e6001c09ca..427210522e 100644
--- a/nomad/cli/client/local.py
+++ b/nomad/cli/client/local.py
@@ -24,7 +24,7 @@ import bravado.exception
 
 from nomad import config, utils
 from nomad.files import ArchiveBasedStagingUploadFiles
-from nomad.datamodel import CalcWithMetadata
+from nomad.datamodel import EntryMetadata
 from nomad.parsing import LocalBackend
 from nomad.cli.parse import parse, normalize, normalize_all
 
@@ -32,7 +32,7 @@ from .client import client
 
 
 class CalcProcReproduction:
-    """
+    '''
     Instances represent a local reproduction of the processing for a single calculation.
     It allows to download raw data from a nomad server and reproduce its processing
     (parsing, normalizing) with the locally installed parsers and normalizers.
@@ -44,7 +44,7 @@ class CalcProcReproduction:
     Arguments:
         calc_id: The calc_id of the calculation to locally process.
         override: Set to true to override any existing local calculation data.
-    """
+    '''
     def __init__(self, archive_id: str, override: bool = False, mainfile: str = None) -> None:
         if '/' in archive_id:
             self.calc_id = utils.archive.calc_id(archive_id)
@@ -125,25 +125,25 @@ class CalcProcReproduction:
         self.upload_files.delete()
 
     def parse(self, parser_name: str = None, **kwargs) -> LocalBackend:
-        """
+        '''
         Run the given parser on the downloaded calculation. If no parser is given,
         do parser matching and use the respective parser.
-        """
+        '''
         return parse(self.mainfile, self.upload_files, parser_name=parser_name, logger=self.logger, **kwargs)
 
     def normalize(self, normalizer: Union[str, Callable], parser_backend: LocalBackend = None):
-        """
+        '''
         Parse the downloaded calculation and run the given normalizer.
-        """
+        '''
         if parser_backend is None:
             parser_backend = self.parse()
 
         return normalize(parser_backend=parser_backend, normalizer=normalizer, logger=self.logger)
 
     def normalize_all(self, parser_backend: LocalBackend = None):
-        """
+        '''
         Parse the downloaded calculation and run the whole normalizer chain.
-        """
+        '''
         return normalize_all(parser_backend=parser_backend, logger=self.logger)
 
 
@@ -173,6 +173,6 @@ def local(calc_id, show_backend, show_metadata, skip_normalizers, not_strict, **
             backend.write_json(sys.stdout, pretty=True)
 
         if show_metadata:
-            metadata = CalcWithMetadata(domain=local.parser.domain)
+            metadata = EntryMetadata(domain=local.parser.domain)
             metadata.apply_domain_metadata(backend)
-            ujson.dump(metadata.to_dict(), sys.stdout, indent=4)
+            ujson.dump(metadata.m_to_dict(), sys.stdout, indent=4)
diff --git a/nomad/cli/client/mirror.py b/nomad/cli/client/mirror.py
index 85b388d65a..d3f07fe9ba 100644
--- a/nomad/cli/client/mirror.py
+++ b/nomad/cli/client/mirror.py
@@ -30,7 +30,7 @@ from .client import client
 
 
 __in_test = False
-""" Will be monkeypatched by tests to alter behavior for testing. """
+''' Will be monkeypatched by tests to alter behavior for testing. '''
 
 _Dataset = Dataset.m_def.m_x('me').me_cls
 __logger = utils.get_logger(__name__)
@@ -82,7 +82,7 @@ def transform_reference(reference):
 
 
 def v0Dot6(upload_data):
-    """ Inplace transforms v0.6.x upload data into v0.7.x upload data. """
+    ''' Inplace transforms v0.6.x upload data into v0.7.x upload data. '''
     upload = json.loads(upload_data.upload)
     upload['user_id'] = tarnsform_user_id(upload['user_id'])
     upload_data.upload = json.dumps(upload)
@@ -318,7 +318,7 @@ def mirror(
             proc.Calc._get_collection().insert(upload_data.calcs)
 
             # index es
-            search.index_all(upload.to_upload_with_metadata().calcs)
+            search.index_all(upload.entries_metadata())
 
         print(
             'Mirrored %s with %d calcs at %s' %
diff --git a/nomad/cli/client/statistics.py b/nomad/cli/client/statistics.py
index 58339bc2f2..1878c5f2a4 100644
--- a/nomad/cli/client/statistics.py
+++ b/nomad/cli/client/statistics.py
@@ -12,9 +12,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 A command that generates various statistics.
-"""
+'''
 
 from matplotlib import scale as mscale
 from matplotlib import transforms as mtransforms
diff --git a/nomad/cli/client/update_database.py b/nomad/cli/client/update_database.py
index 703796fc2a..4d1f44bdfc 100644
--- a/nomad/cli/client/update_database.py
+++ b/nomad/cli/client/update_database.py
@@ -12,12 +12,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 Automatically synchronizes nomad it with a given database. It creates a list of paths
 to mainfiles in nomad and compares it with paths in the external database. The missing
 paths in nomad will then be downloaded from the external database and subsequently
 uploaded to nomad. The downloaded files are by default saved in '/nomad/fairdi/external'.
-"""
+'''
 
 import requests
 import re
diff --git a/nomad/cli/client/upload.py b/nomad/cli/client/upload.py
index fd1ac0848d..a37fe80853 100644
--- a/nomad/cli/client/upload.py
+++ b/nomad/cli/client/upload.py
@@ -41,7 +41,7 @@ def stream_upload_with_client(client, stream, name=None):
 
 
 def upload_file(file_path: str, name: str = None, offline: bool = False, publish: bool = False, client=None):
-    """
+    '''
     Upload a file to nomad.
 
     Arguments:
@@ -51,7 +51,7 @@ def upload_file(file_path: str, name: str = None, offline: bool = False, publish
         publish: automatically publish after successful processing
 
     Returns: The upload_id
-    """
+    '''
     if client is None:
         from nomad.cli.client import create_client
         client = create_client()
diff --git a/nomad/cli/parse.py b/nomad/cli/parse.py
index f1faaa30cb..be462f842c 100644
--- a/nomad/cli/parse.py
+++ b/nomad/cli/parse.py
@@ -8,7 +8,7 @@ from nomad import config, utils, files
 from nomad.parsing import LocalBackend, parser_dict, match_parser, MatchingParser, MetainfoBackend
 from nomad.metainfo.legacy import LegacyMetainfoEnvironment
 from nomad.normalizing import normalizers
-from nomad.datamodel import CalcWithMetadata
+from nomad.datamodel import EntryMetadata
 
 from nomadcore import simple_parser
 
@@ -20,10 +20,10 @@ def parse(
         parser_name: str = None,
         backend_factory: Callable = None,
         strict: bool = True, logger=None) -> LocalBackend:
-    """
+    '''
     Run the given parser on the downloaded calculation. If no parser is given,
     do parser matching and use the respective parser.
-    """
+    '''
     if logger is None:
         logger = utils.get_logger(__name__)
     if parser_name is not None:
@@ -87,9 +87,9 @@ def normalize(
 
 
 def normalize_all(parser_backend: LocalBackend = None, logger=None) -> LocalBackend:
-    """
+    '''
     Parse the downloaded calculation and run the whole normalizer chain.
-    """
+    '''
     for normalizer in normalizers:
         parser_backend = normalize(normalizer, parser_backend=parser_backend, logger=logger)
 
@@ -129,6 +129,6 @@ def _parse(
     if show_backend:
         backend.write_json(sys.stdout, pretty=True)
     if show_metadata:
-        metadata = CalcWithMetadata(domain='dft')  # TODO take domain from matched parser
+        metadata = EntryMetadata(domain='dft')  # TODO take domain from matched parser
         metadata.apply_domain_metadata(backend)
-        json.dump(metadata.to_dict(), sys.stdout, indent=4)
+        json.dump(metadata.m_to_dict(), sys.stdout, indent=4)
diff --git a/nomad/config.py b/nomad/config.py
index 06bcb228ee..e195cc6253 100644
--- a/nomad/config.py
+++ b/nomad/config.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 This module describes all configurable parameters for the nomad python code. The
 configuration is used for all executed python code including API, worker, CLI, and other
 scripts. To use the configuration in your own scripts or new modules, simply import
@@ -30,7 +30,7 @@ over defaults.
 .. autoclass:: nomad.config.NomadConfig
 .. autofunction:: nomad.config.apply
 .. autofunction:: nomad.config.load_config
-"""
+'''
 
 import logging
 import os
@@ -46,10 +46,10 @@ warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
 
 
 class NomadConfig(dict):
-    """
+    '''
     A class for configuration categories. It is a dict subclass that uses attributes as
     key/value pairs.
-    """
+    '''
     def __init__(self, **kwargs):
         super().__init__(**kwargs)
 
@@ -246,11 +246,11 @@ logger = logging.getLogger(__name__)
 
 
 def apply(key, value) -> None:
-    """
+    '''
     Changes the config according to given key and value. The keys are interpreted as paths
     to config values with ``_`` as a separator. E.g. ``fs_staging`` leading to
     ``config.fs.staging``
-    """
+    '''
     path = list(reversed(key.split('_')))
     child_segment = None
     current_value = None
@@ -299,13 +299,13 @@ def apply(key, value) -> None:
 
 
 def load_config(config_file: str = os.environ.get('NOMAD_CONFIG', 'nomad.yaml')) -> None:
-    """
+    '''
     Loads the configuration from the ``config_file`` and environment.
 
     Arguments:
         config_file: Override the configfile, default is file stored in env variable
             NOMAD_CONFIG or ``nomad.yaml``.
-    """
+    '''
     # load yaml and override defaults (only when not in test)
     if os.path.exists(config_file):
         with open(config_file, 'r') as stream:
diff --git a/nomad/datamodel/__init__.py b/nomad/datamodel/__init__.py
index ef64360237..c4f152be38 100644
--- a/nomad/datamodel/__init__.py
+++ b/nomad/datamodel/__init__.py
@@ -12,39 +12,15 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 This module contains classes that allow to represent the core
-nomad data entities :class:`Upload` and :class:`Calc` on a high level of abstraction
+nomad data entities (entries/calculations, users, datasets) on a high level of abstraction
 independent from their representation in the different modules
 :py:mod:`nomad.processing`, :py:mod:`nomad.coe_repo`, :py:mod:`nomad.parsing`,
 :py:mod:`nomad.search`, :py:mod:`nomad.app`, :py:mod:`nomad.migration`.
 
 It is not about representing every detail, but those parts that are directly involved in
-api, processing, migration, mirroring, or other 'infrastructure' operations.
-
-Transformations between different implementations of the same entity can be build
-and used. To ease the number of necessary transformations the classes
-:class:`UploadWithMetadata` and :class:`CalcWithMetadata` can act as intermediate
-representations. Therefore, implement only transformation from and to these
-classes. These are the implemented transformations:
-
-.. image:: datamodel_transformations.png
-
-.. autoclass:: nomad.datamodel.UploadWithMetadata
-    :members:
-.. autoclass:: nomad.datamodel.CalcWithMetadata
-    :members:
-
-The class :class:`CalcWithMetadata` only defines non domain specific metadata quantities
-about ids, user metadata, etc. To define domain specific quantities :class:`CalcWithMetadata`
-must be subclassed. The classes
-:class:`Domain` and :class:`DomainQuantity` can be used to further define domain specific
-quantities.
-
-.. autoclass:: nomad.datamodel.Domain
-    :members:
-.. autoclass:: nomad.datamodel.DomainQuantity
-    :members:
+api, processing, mirroring, or other 'infrastructure' operations.
 
 The class :class:`User` is used to represent users and their attributes.
 
@@ -55,12 +31,33 @@ The class :class:`Dataset` is used to represent datasets and their attributes.
 
 .. autoclass:: nomad.datamodel.Dataset
     :members:
-"""
 
-import sys
+The class :class:`UserMetadata` is used to represent user determined entry metadata.
+
+.. autoclass:: nomad.datamodel.UserMetadata
+    :members:
+
+The class :class:`EntryMetadata` is used to represent all metadata about an entry.
 
-from nomad.datamodel.base import UploadWithMetadata, CalcWithMetadata, Domain, DomainQuantity
-from nomad.datamodel import ems, dft
-from nomad.datamodel.dft import DFTCalcWithMetadata
-from nomad.datamodel.ems import EMSEntryWithMetadata
-from nomad.datamodel.metainfo import Dataset, User, UserMetadata
+.. autoclass:: nomad.datamodel.EntryMetadata
+    :members:
+'''
+
+from .dft import DFTMetadata
+from .ems import EMSMetadata
+from .metainfo import Dataset, User, EditableUserMetadata, UserMetadata, EntryMetadata
+
+domains = {
+    'dft': {
+        'metadata': DFTMetadata,
+        'metainfo_all_package': 'all.nomadmetainfo.json',
+        'root_section': 'section_run'
+    },
+    'ems': {
+        'metadata': EMSMetadata,
+        'metainfo_all_package': 'all.experimental.nomadmetainfo.json',
+        'root_section': 'section_experiment'
+    }
+}
+
+root_sections = [domain['root_section'] for domain in domains.values()] + ['section_entry_info']
diff --git a/nomad/datamodel/base.py b/nomad/datamodel/base.py
index 5dd084ebc9..99705430c0 100644
--- a/nomad/datamodel/base.py
+++ b/nomad/datamodel/base.py
@@ -12,508 +12,335 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from typing import Iterable, List, Dict, Type, Tuple, Callable, Any
-import datetime
-from elasticsearch_dsl import Keyword, Integer
-from collections.abc import Mapping
 import numpy as np
-import ase.data
 
 from nomad import config
 
-from .metainfo import Dataset, User
-
-
-class UploadWithMetadata():
-    """
-    See :class:`CalcWithMetadata`.
-    """
-
-    def __init__(self, **kwargs):
-        self.upload_id: str = None
-        self.uploader: str = None
-        self.upload_time: datetime.datetime = None
-
-        self.calcs: Iterable['CalcWithMetadata'] = list()
-
-        for key, value in kwargs.items():
-            setattr(self, key, value)
-
-    @property
-    def calcs_dict(self) -> Dict[str, 'CalcWithMetadata']:
-        return {calc.calc_id: calc for calc in self.calcs}
-
-
-class CalcWithMetadata(Mapping):
-    """
-    A dict/POPO class that can be used for mapping calc representations with calc metadata.
-    We have multi representations of calcs and their calc metadata. To avoid implement
-    mappings between all combinations, just implement mappings with the class and use
-    mapping transitivity. E.g. instead of A -> B, A -> this -> B.
-
-    This is basically an abstract class and it has to be subclassed for each :class:`Domain`.
-    Subclasses can define additional attributes and have to implement :func:`apply_domain_metadata`
-    to fill these attributes from processed entries, i.e. instance of :class:`nomad.parsing.LocalBackend`.
-
-    Attributes:
-        domain: Must be the key for a registered domain. This determines which actual
-            subclass is instantiated.
-        upload_id: The ``upload_id`` of the calculations upload (random UUID).
-        calc_id: The unique mainfile based calculation id.
-        calc_hash: The raw file content based checksum/hash of this calculation.
-        pid: The unique persistent id of this calculation.
-        mainfile: The upload relative mainfile path.
-
-        files: A list of all files, relative to upload.
-        upload_time: The time when the calc was uploaded.
-        uploader: An object describing the uploading user, has at least ``user_id``
-        processed: Boolean indicating if this calc was successfully processed and archive
-            data and calc metadata is available.
-        last_processing: A datatime with the time of the last successful processing.
-        nomad_version: A string that describes the version of the nomad software that was
-            used to do the last successful processing.
-
-        with_embargo: Show if user set an embargo on the calculation.
-        coauthors: List of coauther user objects with at ``user_id``.
-        shared_with: List of users this calcs ownership is shared with, objects with at ``user_id``.
-        comment: String comment.
-        references: Objects describing user provided references, keys are ``id`` and ``value``.
-        datasets: A list of dataset ids. The corresponding :class:`Dataset`s must exist.
-    """
-
-    def __new__(cls, domain: str = None, **kwargs):
-        if domain is not None:
-            domain_obj = Domain.instances.get(domain)
-            assert domain_obj is not None
-            return super().__new__(domain_obj.domain_entry_class)
-        else:
-            return super().__new__(cls)
-
-    def __init__(self, domain: str = None, **kwargs):
-        self.domain = domain
-
-        # id relevant metadata
-        self.upload_id: str = None
-        self.calc_id: str = None
-        self.calc_hash: str = None
-        self.mainfile: str = None
-        self.pid: int = None
-        self.raw_id: str = None
-
-        # basic upload and processing related metadata
-        self.upload_time: datetime.datetime = None
-        self.upload_name: str = None
-        self.files: List[str] = None
-        self.uploader: str = None
-        self.processed: bool = False
-        self.last_processing: datetime.datetime = None
-        self.nomad_version: str = None
-        self.nomad_commit: str = None
-
-        # user metadata, i.e. quantities given and editable by the user
-        self.with_embargo: bool = None
-        self.published: bool = False
-        self.coauthors: List[str] = []
-        self.shared_with: List[str] = []
-        self.comment: str = None
-        self.references: List[str] = []
-        self.datasets: List[str] = []
-        self.external_id: str = None
-        self.last_edit: datetime.datetime = None
-
-        # parser related general (not domain specific) metadata
-        self.parser_name = None
-
-        # domain generic metadata
-        self.formula: str = None
-        self.atoms: List[str] = []
-        self.n_atoms: int = 0
-
-        self.update(**kwargs)
-
-    def __getitem__(self, key):
-        value = getattr(self, key, None)
-
-        if value is None or key in ['backend']:
-            raise KeyError()
-
-        return value
-
-    def __iter__(self):
-        for key, value in self.__dict__.items():
-            if value is None or key in ['backend']:
-                continue
-
-            yield key
-
-    def __len__(self):
-        count = 0
-        for key, value in self.__dict__.items():
-            if value is None or key in ['backend']:
-                continue
-            count += 1
-
-        return count
-
-    def to_dict(self):
-        return {key: value for key, value in self.items()}
-
-    def __str__(self):
-        return str(self.to_dict())
-
-    def update(self, **kwargs):
-        for key, value in kwargs.items():
-            if value is None:
-                continue
-
-            setattr(self, key, value)
-
-    def apply_user_metadata(self, metadata: dict):
-        """
-        Applies a user provided metadata dict to this calc.
-        """
-        self.pid = metadata.get('_pid', self.pid)
-        self.comment = metadata.get('comment', self.comment)
-        self.upload_time = metadata.get('_upload_time', self.upload_time)
-        uploader_id = metadata.get('_uploader')
-        if uploader_id is not None:
-            self.uploader = uploader_id
-        self.references = metadata.get('references', [])
-        self.with_embargo = metadata.get('with_embargo', self.with_embargo)
-        self.coauthors = [
-            user_id for user_id in metadata.get('coauthors', self.coauthors)
-            if User.get(user_id=user_id) is not None]
-        self.shared_with = [
-            user_id for user_id in metadata.get('shared_with', self.shared_with)
-            if User.get(user_id=user_id) is not None]
-        self.datasets = [
-            dataset_id for dataset_id in metadata.get('datasets', self.datasets)
-            if Dataset.m_def.m_x('me').get(dataset_id=dataset_id) is not None]
-        self.external_id = metadata.get('external_id')
-
-    def apply_domain_metadata(self, backend):
-        raise NotImplementedError()
-
-
-class DomainQuantity:
-    """
-    This class can be used to define further details about a domain specific metadata
-    quantity.
-
-    Attributes:
-        name: The name of the quantity, also the key used to store values in
-            :class:`CalcWithMetadata`
-        description: A human friendly description. The description is used to define
-            the swagger documentation on the relevant API endpoints.
-        multi: Indicates a list of values. This is important for the elastic mapping.
-        order_default: Indicates that this metric should be used for the default order of
-            search results.
-        aggregations: Indicates that search aggregations (and how many) should be provided.
-            0 (the default) means no aggregations.
-        metric: Indicates that this quantity should be used as search metric. Values need
-            to be tuples with metric name and elastic aggregation (e.g. sum, cardinality)
-        elastic_mapping: An optional elasticsearch_dsl mapping. Default is ``Keyword``.
-        elastic_search_type: An optional elasticsearch search type. Default is ``term``.
-        elastic_field: An optional elasticsearch key. Default is the name of the quantity.
-        elastic_value: A collable that takes a :class:`CalcWithMetadata` as input and produces the
-            value for the elastic search index.
-        argparse_action: Action to use on argparse, either append or split for multi values. Append is default.
-    """
-
-    def __init__(
-            self, description: str = None, multi: bool = False, aggregations: int = 0,
-            order_default: bool = False, metric: Tuple[str, str] = None,
-            metadata_field: str = None, elastic_mapping: type = None,
-            elastic_search_type: str = 'term', elastic_field: str = None,
-            elastic_value: Callable[[Any], Any] = None,
-            argparse_action: str = 'append'):
-
-        self.domain: str = None
-        self._name: str = None
-        self.description = description
-        self.multi = multi
-        self.order_default = order_default
-        self.aggregations = aggregations
-        self.metric = metric
-        self.elastic_mapping = elastic_mapping
-        self.elastic_search_type = elastic_search_type
-        self.metadata_field = metadata_field
-        self.elastic_field = elastic_field
-        self.argparse_action = argparse_action
-
-        self.elastic_value = elastic_value
-        if self.elastic_value is None:
-            self.elastic_value = lambda o: o
-
-        if self.elastic_mapping is None:
-            self.elastic_mapping = Keyword(multi=self.multi)
-
-    @property
-    def name(self) -> str:
-        return self._name
-
-    @name.setter
-    def name(self, name: str) -> None:
-        self._name = name
-        if self.metadata_field is None:
-            self.metadata_field = name
-        if self.elastic_field is None:
-            self.elastic_field = self.name
-
-    @property
-    def qualified_elastic_field(self) -> str:
-        if self.domain is None:
-            return self.elastic_field
-        else:
-            return '%s.%s' % (self.domain, self.elastic_field)
-
-    @property
-    def qualified_name(self) -> str:
-        if self.domain is None:
-            return self.name
-        else:
-            return '%s.%s' % (self.domain, self.name)
-
-
-def only_atoms(atoms):
-    numbers = [ase.data.atomic_numbers[atom] for atom in atoms]
-    only_atoms = [ase.data.chemical_symbols[number] for number in sorted(numbers)]
-    return ''.join(only_atoms)
-
-
-class Domain:
-    """
-    A domain defines all metadata quantities that are specific to a certain scientific
-    domain, e.g. DFT calculations, or experimental material science.
-
-    Each domain needs to define a subclass of :class:`CalcWithMetadata`. This
-    class has to define the necessary domain specific metadata quantities and how these
-    are filled from parser results (usually an instance of :class:LocalBackend).
-
-    Furthermore, the class method :func:`register_domain` of this ``Domain`` class has
-    to be used to register a domain with ``domain_nam``. This also allows to provide
-    further descriptions on each domain specific quantity via instance of :class:`DomainQuantity`.
-
-    While there can be multiple domains registered. Currently, only one domain can be
-    active. This active domain is define in the configuration using the ``domain_name``.
-
-    Arguments:
-        name: A name for the domain. This is used as key in the configuration ``config.domain``.
-        domain_entry_class: A subclass of :class:`CalcWithMetadata` that adds the
-            domain specific quantities.
-        quantities: Additional specifications for the quantities in ``domain_entry_class`` as
-            instances of :class:`DomainQuantity`.
-        metrics: Tuples of elastic field name and elastic aggregation operation that
-            can be used to create statistic values.
-        groups: Tuple of quantity name and metric that describes quantities that
-            can be used to group entries by quantity values.
-        root_sections: The name of the possible root sections for this domain.
-        metainfo_all_package: The name of the full metainfo package for this domain.
-    """
-    instances: Dict[str, 'Domain'] = {}
-
-    base_quantities = dict(
-        authors=DomainQuantity(
-            elastic_field='authors.name.keyword', multi=True, aggregations=1000,
-            description=(
-                'Search for the given author. Exact keyword matches in the form "Lastname, '
-                'Firstname".')),
-        uploader_id=DomainQuantity(
-            elastic_field='uploader.user_id', multi=False, aggregations=5,
-            description=('Search for the given uploader id.')),
-        uploader_name=DomainQuantity(
-            elastic_field='uploader.name.keyword', multi=False,
-            description=('Search for the exact uploader\'s full name')),
-        comment=DomainQuantity(
-            elastic_search_type='match', multi=True,
-            description='Search within the comments. This is a text search ala google.'),
-        paths=DomainQuantity(
-            elastic_search_type='match', elastic_field='files', multi=True,
-            description='Search for elements in one of the file paths. The paths are split at all "/".'),
-        files=DomainQuantity(
-            elastic_field='files.keyword', multi=True,
-            description='Search for exact file name with full path.'),
-        quantities=DomainQuantity(
-            multi=True,
-            description='Search for the existence of a certain meta-info quantity'),
-        upload_id=DomainQuantity(
-            description='Search for the upload_id.',
-            multi=True, argparse_action='split', elastic_search_type='terms'),
-        upload_time=DomainQuantity(
-            description='Search for the exact upload time.', elastic_search_type='terms'),
-        upload_name=DomainQuantity(
-            description='Search for the upload_name.',
-            multi=True, argparse_action='split', elastic_search_type='terms'),
-        calc_id=DomainQuantity(
-            description='Search for the calc_id.',
-            multi=True, argparse_action='split', elastic_search_type='terms'),
-        pid=DomainQuantity(
-            description='Search for the pid.',
-            multi=True, argparse_action='split', elastic_search_type='terms'),
-        raw_id=DomainQuantity(
-            description='Search for the raw_id.',
-            multi=True, argparse_action='split', elastic_search_type='terms'),
-        mainfile=DomainQuantity(
-            description='Search for the mainfile.',
-            multi=True, argparse_action='append', elastic_search_type='terms'),
-        external_id=DomainQuantity(
-            description='External user provided id. Does not have to be unique necessarily.',
-            multi=True, argparse_action='split', elastic_search_type='terms'),
-        calc_hash=DomainQuantity(
-            description='Search for the entries hash.',
-            multi=True, argparse_action='split', elastic_search_type='terms'),
-        dataset=DomainQuantity(
-            elastic_field='datasets.name', multi=True, elastic_search_type='match',
-            description='Search for a particular dataset by name.'),
-        dataset_id=DomainQuantity(
-            elastic_field='datasets.id', multi=True,
-            description='Search for a particular dataset by its id.'),
-        doi=DomainQuantity(
-            elastic_field='datasets.doi', multi=True,
-            description='Search for a particular dataset by doi (incl. http://dx.doi.org).'),
-        formula=DomainQuantity(
-            'The chemical (hill) formula of the simulated system.',
-            order_default=True),
-        atoms=DomainQuantity(
-            'The atom labels of all atoms in the simulated system.',
-            aggregations=len(ase.data.chemical_symbols), multi=True),
-        only_atoms=DomainQuantity(
-            'The atom labels concatenated in species-number order. Used with keyword search '
-            'to facilitate exclusive searches.',
-            elastic_value=only_atoms, metadata_field='atoms', multi=True),
-        n_atoms=DomainQuantity(
-            'Number of atoms in the simulated system',
-            elastic_mapping=Integer()))
-
-    base_metrics = dict(
-        datasets=('dataset_id', 'cardinality'),
-        uploads=('upload_id', 'cardinality'),
-        uploaders=('uploader_name', 'cardinality'),
-        authors=('authors', 'cardinality'),
-        unique_entries=('calc_hash', 'cardinality'))
-
-    base_groups = dict(
-        datasets=('dataset_id', 'datasets'),
-        uploads=('upload_id', 'uploads'))
-
-    @classmethod
-    def get_quantity(cls, name_spec) -> DomainQuantity:
-        """
-        Returns the quantity definition for the given quantity name. The name can be the
-        qualified name (``domain.quantity``) or in Django-style (``domain__quantity``).
-        """
-        qualified_name = name_spec.replace('__', '.')
-        split_name = qualified_name.split('.')
-        if len(split_name) == 1:
-            return cls.base_quantities[split_name[0]]
-        elif len(split_name) == 2:
-            return cls.instances[split_name[0]].quantities[split_name[1]]
-        else:
-            assert False, 'qualified quantity name depth must be 2 max'
-
-    @classmethod
-    def all_quantities(cls) -> Iterable[DomainQuantity]:
-        return set([quantity for domain in cls.instances.values() for quantity in domain.quantities.values()])
-
-    def __init__(
-            self, name: str, domain_entry_class: Type[CalcWithMetadata],
-            quantities: Dict[str, DomainQuantity],
-            metrics: Dict[str, Tuple[str, str]],
-            groups: Dict[str, Tuple[str, str]],
-            default_statistics: List[str],
-            root_sections=['section_run', 'section_entry_info'],
-            metainfo_all_package='all.nomadmetainfo.json') -> None:
-
-        domain_quantities = quantities
-
-        Domain.instances[name] = self
-
-        self.name = name
-        self.domain_entry_class = domain_entry_class
-        self.domain_quantities: Dict[str, DomainQuantity] = {}
-        self.root_sections = root_sections
-        self.metainfo_all_package = metainfo_all_package
-        self.default_statistics = default_statistics
-
-        reference_domain_calc = CalcWithMetadata(domain=name)
-        reference_general_calc = CalcWithMetadata(domain=None)
-
-        # add non specified quantities from additional metadata class fields
-        for quantity_name in reference_domain_calc.__dict__.keys():
-            if not hasattr(reference_general_calc, quantity_name):
-                quantity = domain_quantities.get(quantity_name, None)
-                if quantity is None:
-                    domain_quantities[quantity_name] = DomainQuantity()
-
-        # ensure domain quantity names and domains
-        for quantity_name, quantity in domain_quantities.items():
-            quantity.domain = name
-            quantity.name = quantity_name
-
-        # add domain prefix to domain metrics and groups
-        domain_metrics = {
-            '%s.%s' % (name, key): (quantities[quantity].qualified_elastic_field, es_op)
-            for key, (quantity, es_op) in metrics.items()}
-        domain_groups = {
-            '%s.%s' % (name, key): (quantities[quantity].qualified_name, '%s.%s' % (name, metric))
-            for key, (quantity, metric) in groups.items()}
-
-        # add all domain quantities
-        for quantity_name, quantity in domain_quantities.items():
-            self.domain_quantities[quantity.name] = quantity
-
-            # update the multi status from an example value
-            if quantity.metadata_field in reference_domain_calc.__dict__:
-                quantity.multi = isinstance(
-                    reference_domain_calc.__dict__[quantity.metadata_field], list)
-
-            assert not hasattr(reference_general_calc, quantity_name), \
-                'quantity overrides general non domain quantity: %s' % quantity_name
-
-        # construct search quantities from base and domain quantities
-        self.quantities = dict(**Domain.base_quantities)
-        for quantity_name, quantity in self.quantities.items():
-            quantity.name = quantity_name
-        self.quantities.update(self.domain_quantities)
-
-        assert any(quantity.order_default for quantity in Domain.instances[name].quantities.values()), \
-            'you need to define a order default quantity'
-
-        # construct metrics from base and domain metrics
-        self.metrics = dict(**Domain.base_metrics)
-        self.metrics.update(**domain_metrics)
-        self.groups = dict(**Domain.base_groups)
-        self.groups.update(**domain_groups)
-
-    @property
-    def metrics_names(self) -> Iterable[str]:
-        """ Just the names of all metrics. """
-        return list(self.metrics.keys())
-
-    @property
-    def aggregations(self) -> Dict[str, int]:
-        """
-        The search aggregations and the default maximum number of calculated buckets. See also
-        :func:`nomad.search.aggregations`.
-        """
-        return {
-            quantity.name: quantity.aggregations
-            for quantity in self.quantities.values()
-            if quantity.aggregations > 0
-        }
-
-    @property
-    def aggregations_names(self) -> Iterable[str]:
-        """ Just the names of all metrics. """
-        return list(self.aggregations.keys())
-
-    @property
-    def order_default_quantity(self) -> str:
-        for quantity in self.quantities.values():
-            if quantity.order_default:
-                return quantity.qualified_name
-
-        assert False, 'each domain must defina an order_default quantity'
+# from .metainfo import Dataset, User, EntryMetadata
+
+
+# class DomainQuantity:
+#     '''
+#     This class can be used to define further details about a domain specific metadata
+#     quantity.
+
+#     Attributes:
+#         name: The name of the quantity, also the key used to store values in
+#             :class:`EntryMetadata`
+#         description: A human friendly description. The description is used to define
+#             the swagger documentation on the relevant API endpoints.
+#         multi: Indicates a list of values. This is important for the elastic mapping.
+#         order_default: Indicates that this metric should be used for the default order of
+#             search results.
+#         aggregations: Indicates that search aggregations (and how many) should be provided.
+#             0 (the default) means no aggregations.
+#         metric: Indicates that this quantity should be used as search metric. Values need
+#             to be tuples with metric name and elastic aggregation (e.g. sum, cardinality)
+#         elastic_mapping: An optional elasticsearch_dsl mapping. Default is ``Keyword``.
+#         elastic_search_type: An optional elasticsearch search type. Default is ``term``.
+#         elastic_field: An optional elasticsearch key. Default is the name of the quantity.
+#         elastic_value: A collable that takes a :class:`EntryMetadata` as input and produces the
+#             value for the elastic search index.
+#         argparse_action: Action to use on argparse, either append or split for multi values. Append is default.
+#     '''
+
+#     def __init__(
+#             self, description: str = None, multi: bool = False, aggregations: int = 0,
+#             order_default: bool = False, metric: Tuple[str, str] = None,
+#             metadata_field: str = None, elastic_mapping: type = None,
+#             elastic_search_type: str = 'term', elastic_field: str = None,
+#             elastic_value: Callable[[Any], Any] = None,
+#             argparse_action: str = 'append'):
+
+#         self.domain: str = None
+#         self._name: str = None
+#         self.description = description
+#         self.multi = multi
+#         self.order_default = order_default
+#         self.aggregations = aggregations
+#         self.metric = metric
+#         self.elastic_mapping = elastic_mapping
+#         self.elastic_search_type = elastic_search_type
+#         self.metadata_field = metadata_field
+#         self.elastic_field = elastic_field
+#         self.argparse_action = argparse_action
+
+#         self.elastic_value = elastic_value
+#         if self.elastic_value is None:
+#             self.elastic_value = lambda o: o
+
+#         if self.elastic_mapping is None:
+#             self.elastic_mapping = Keyword(multi=self.multi)
+
+#     @property
+#     def name(self) -> str:
+#         return self._name
+
+#     @name.setter
+#     def name(self, name: str) -> None:
+#         self._name = name
+#         if self.metadata_field is None:
+#             self.metadata_field = name
+#         if self.elastic_field is None:
+#             self.elastic_field = self.name
+
+#     @property
+#     def qualified_elastic_field(self) -> str:
+#         if self.domain is None:
+#             return self.elastic_field
+#         else:
+#             return '%s.%s' % (self.domain, self.elastic_field)
+
+#     @property
+#     def qualified_name(self) -> str:
+#         if self.domain is None:
+#             return self.name
+#         else:
+#             return '%s.%s' % (self.domain, self.name)
+
+
+# def only_atoms(atoms):
+#     numbers = [ase.data.atomic_numbers[atom] for atom in atoms]
+#     only_atoms = [ase.data.chemical_symbols[number] for number in sorted(numbers)]
+#     return ''.join(only_atoms)
+
+
+# class Domain:
+#     '''
+#     A domain defines all metadata quantities that are specific to a certain scientific
+#     domain, e.g. DFT calculations, or experimental material science.
+
+#     Each domain needs to define a subclass of :class:`EntryMetadata`. This
+#     class has to define the necessary domain specific metadata quantities and how these
+#     are filled from parser results (usually an instance of :class:LocalBackend).
+
+#     Furthermore, the class method :func:`register_domain` of this ``Domain`` class has
+#     to be used to register a domain with ``domain_nam``. This also allows to provide
+#     further descriptions on each domain specific quantity via instance of :class:`DomainQuantity`.
+
+#     While there can be multiple domains registered. Currently, only one domain can be
+#     active. This active domain is define in the configuration using the ``domain_name``.
+
+#     Arguments:
+#         name: A name for the domain. This is used as key in the configuration ``config.domain``.
+#         domain_entry_class: A subclass of :class:`EntryMetadata` that adds the
+#             domain specific quantities.
+#         quantities: Additional specifications for the quantities in ``domain_entry_class`` as
+#             instances of :class:`DomainQuantity`.
+#         metrics: Tuples of elastic field name and elastic aggregation operation that
+#             can be used to create statistic values.
+#         groups: Tuple of quantity name and metric that describes quantities that
+#             can be used to group entries by quantity values.
+#         root_sections: The name of the possible root sections for this domain.
+#         metainfo_all_package: The name of the full metainfo package for this domain.
+#     '''
+#     instances: Dict[str, 'Domain'] = {}
+
+#     base_quantities = dict(
+#         authors=DomainQuantity(
+#             elastic_field='authors.name.keyword', multi=True, aggregations=1000,
+#             description=(
+#                 'Search for the given author. Exact keyword matches in the form "Lastname, '
+#                 'Firstname".')),
+#         uploader_id=DomainQuantity(
+#             elastic_field='uploader.user_id', multi=False, aggregations=5,
+#             description=('Search for the given uploader id.')),
+#         uploader_name=DomainQuantity(
+#             elastic_field='uploader.name.keyword', multi=False,
+#             description=('Search for the exact uploader\'s full name')),
+#         comment=DomainQuantity(
+#             elastic_search_type='match', multi=True,
+#             description='Search within the comments. This is a text search ala google.'),
+#         paths=DomainQuantity(
+#             elastic_search_type='match', elastic_field='files', multi=True,
+#             description='Search for elements in one of the file paths. The paths are split at all "/".'),
+#         files=DomainQuantity(
+#             elastic_field='files.keyword', multi=True,
+#             description='Search for exact file name with full path.'),
+#         quantities=DomainQuantity(
+#             multi=True,
+#             description='Search for the existence of a certain meta-info quantity'),
+#         upload_id=DomainQuantity(
+#             description='Search for the upload_id.',
+#             multi=True, argparse_action='split', elastic_search_type='terms'),
+#         upload_time=DomainQuantity(
+#             description='Search for the exact upload time.', elastic_search_type='terms'),
+#         upload_name=DomainQuantity(
+#             description='Search for the upload_name.',
+#             multi=True, argparse_action='split', elastic_search_type='terms'),
+#         calc_id=DomainQuantity(
+#             description='Search for the calc_id.',
+#             multi=True, argparse_action='split', elastic_search_type='terms'),
+#         pid=DomainQuantity(
+#             description='Search for the pid.',
+#             multi=True, argparse_action='split', elastic_search_type='terms'),
+#         raw_id=DomainQuantity(
+#             description='Search for the raw_id.',
+#             multi=True, argparse_action='split', elastic_search_type='terms'),
+#         mainfile=DomainQuantity(
+#             description='Search for the mainfile.',
+#             multi=True, argparse_action='append', elastic_search_type='terms'),
+#         external_id=DomainQuantity(
+#             description='External user provided id. Does not have to be unique necessarily.',
+#             multi=True, argparse_action='split', elastic_search_type='terms'),
+#         calc_hash=DomainQuantity(
+#             description='Search for the entries hash.',
+#             multi=True, argparse_action='split', elastic_search_type='terms'),
+#         dataset=DomainQuantity(
+#             elastic_field='datasets.name', multi=True, elastic_search_type='match',
+#             description='Search for a particular dataset by name.'),
+#         dataset_id=DomainQuantity(
+#             elastic_field='datasets.id', multi=True,
+#             description='Search for a particular dataset by its id.'),
+#         doi=DomainQuantity(
+#             elastic_field='datasets.doi', multi=True,
+#             description='Search for a particular dataset by doi (incl. http://dx.doi.org).'),
+#         formula=DomainQuantity(
+#             'The chemical (hill) formula of the simulated system.',
+#             order_default=True),
+#         atoms=DomainQuantity(
+#             'The atom labels of all atoms in the simulated system.',
+#             aggregations=len(ase.data.chemical_symbols), multi=True),
+#         only_atoms=DomainQuantity(
+#             'The atom labels concatenated in species-number order. Used with keyword search '
+#             'to facilitate exclusive searches.',
+#             elastic_value=only_atoms, metadata_field='atoms', multi=True),
+#         n_atoms=DomainQuantity(
+#             'Number of atoms in the simulated system',
+#             elastic_mapping=Integer()))
+
+#     base_metrics = dict(
+#         datasets=('dataset_id', 'cardinality'),
+#         uploads=('upload_id', 'cardinality'),
+#         uploaders=('uploader_name', 'cardinality'),
+#         authors=('authors', 'cardinality'),
+#         unique_entries=('calc_hash', 'cardinality'))
+
+#     base_groups = dict(
+#         datasets=('dataset_id', 'datasets'),
+#         uploads=('upload_id', 'uploads'))
+
+#     @classmethod
+#     def get_quantity(cls, name_spec) -> DomainQuantity:
+#         '''
+#         Returns the quantity definition for the given quantity name. The name can be the
+#         qualified name (``domain.quantity``) or in Django-style (``domain__quantity``).
+#         '''
+#         qualified_name = name_spec.replace('__', '.')
+#         split_name = qualified_name.split('.')
+#         if len(split_name) == 1:
+#             return cls.base_quantities[split_name[0]]
+#         elif len(split_name) == 2:
+#             return cls.instances[split_name[0]].quantities[split_name[1]]
+#         else:
+#             assert False, 'qualified quantity name depth must be 2 max'
+
+#     @classmethod
+#     def all_quantities(cls) -> Iterable[DomainQuantity]:
+#         return set([quantity for domain in cls.instances.values() for quantity in domain.quantities.values()])
+
+#     def __init__(
+#             self, name: str, domain_entry_class: Type[EntryMetadata],
+#             quantities: Dict[str, DomainQuantity],
+#             metrics: Dict[str, Tuple[str, str]],
+#             groups: Dict[str, Tuple[str, str]],
+#             default_statistics: List[str],
+#             root_sections=['section_run', 'section_entry_info'],
+#             metainfo_all_package='all.nomadmetainfo.json') -> None:
+
+#         domain_quantities = quantities
+
+#         Domain.instances[name] = self
+
+#         self.name = name
+#         self.domain_entry_class = domain_entry_class
+#         self.domain_quantities: Dict[str, DomainQuantity] = {}
+#         self.root_sections = root_sections
+#         self.metainfo_all_package = metainfo_all_package
+#         self.default_statistics = default_statistics
+
+#         # TODO
+#         return
+
+#         reference_domain_calc = EntryMetadata(domain=name)
+#         reference_general_calc = EntryMetadata(domain=None)
+
+#         # add non specified quantities from additional metadata class fields
+#         for quantity_name in reference_domain_calc.__dict__.keys():
+#             if not hasattr(reference_general_calc, quantity_name):
+#                 quantity = domain_quantities.get(quantity_name, None)
+#                 if quantity is None:
+#                     domain_quantities[quantity_name] = DomainQuantity()
+
+#         # ensure domain quantity names and domains
+#         for quantity_name, quantity in domain_quantities.items():
+#             quantity.domain = name
+#             quantity.name = quantity_name
+
+#         # add domain prefix to domain metrics and groups
+#         domain_metrics = {
+#             '%s.%s' % (name, key): (quantities[quantity].qualified_elastic_field, es_op)
+#             for key, (quantity, es_op) in metrics.items()}
+#         domain_groups = {
+#             '%s.%s' % (name, key): (quantities[quantity].qualified_name, '%s.%s' % (name, metric))
+#             for key, (quantity, metric) in groups.items()}
+
+#         # add all domain quantities
+#         for quantity_name, quantity in domain_quantities.items():
+#             self.domain_quantities[quantity.name] = quantity
+
+#             # update the multi status from an example value
+#             if quantity.metadata_field in reference_domain_calc.__dict__:
+#                 quantity.multi = isinstance(
+#                     reference_domain_calc.__dict__[quantity.metadata_field], list)
+
+#             assert not hasattr(reference_general_calc, quantity_name), \
+#                 'quantity overrides general non domain quantity: %s' % quantity_name
+
+#         # construct search quantities from base and domain quantities
+#         self.quantities = dict(**Domain.base_quantities)
+#         for quantity_name, quantity in self.quantities.items():
+#             quantity.name = quantity_name
+#         self.quantities.update(self.domain_quantities)
+
+#         assert any(quantity.order_default for quantity in Domain.instances[name].quantities.values()), \
+#             'you need to define a order default quantity'
+
+#         # construct metrics from base and domain metrics
+#         self.metrics = dict(**Domain.base_metrics)
+#         self.metrics.update(**domain_metrics)
+#         self.groups = dict(**Domain.base_groups)
+#         self.groups.update(**domain_groups)
+
+#     @property
+#     def metrics_names(self) -> Iterable[str]:
+#         ''' Just the names of all metrics. '''
+#         return list(self.metrics.keys())
+
+#     @property
+#     def aggregations(self) -> Dict[str, int]:
+#         '''
+#         The search aggregations and the default maximum number of calculated buckets. See also
+#         :func:`nomad.search.aggregations`.
+#         '''
+#         return {
+#             quantity.name: quantity.aggregations
+#             for quantity in self.quantities.values()
+#             if quantity.aggregations > 0
+#         }
+
+#     @property
+#     def aggregations_names(self) -> Iterable[str]:
+#         ''' Just the names of all metrics. '''
+#         return list(self.aggregations.keys())
+
+#     @property
+#     def order_default_quantity(self) -> str:
+#         for quantity in self.quantities.values():
+#             if quantity.order_default:
+#                 return quantity.qualified_name
+
+#         assert False, 'each domain must defina an order_default quantity'
 
 
 def get_optional_backend_value(backend, key, section, unavailable_value=None, logger=None):
diff --git a/nomad/datamodel/dft.py b/nomad/datamodel/dft.py
index f9cdb22341..49efdfcd81 100644
--- a/nomad/datamodel/dft.py
+++ b/nomad/datamodel/dft.py
@@ -12,21 +12,19 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 DFT specific metadata
-"""
+'''
 
-from typing import List
 import re
-from elasticsearch_dsl import Integer, Object, InnerDoc, Keyword
 
 from nomadcore.local_backend import ParserEvent
 
 from nomad import utils, config
-from nomad.metainfo import optimade, MSection, Section, Quantity, MEnum
-from nomad.metainfo.elastic import elastic_mapping, elastic_obj
+from nomad.metainfo import optimade, MSection, Section, Quantity, MEnum, SubSection
+from nomad.metainfo.search import SearchQuantity
 
-from .base import CalcWithMetadata, DomainQuantity, Domain, get_optional_backend_value
+from .base import get_optional_backend_value
 
 
 xc_treatments = {
@@ -38,7 +36,7 @@ xc_treatments = {
     'vdw': 'vdW',
     'lda': 'LDA',
 }
-""" https://gitlab.mpcdf.mpg.de/nomad-lab/nomad-meta-info/wikis/metainfo/XC-functional """
+''' https://gitlab.mpcdf.mpg.de/nomad-lab/nomad-meta-info/wikis/metainfo/XC-functional '''
 
 basis_sets = {
     'gaussians': 'gaussians',
@@ -70,81 +68,130 @@ def simplify_version(version):
 
 
 class Label(MSection):
-    """
+    '''
     Label that further classify a structure.
 
     Attributes:
         label: The label as a string
         type: The type of the label
         source: The source that this label was taken from.
-    """
 
-    m_def = Section(a_elastic=dict(type=InnerDoc))
-
-    label = Quantity(type=str, a_elastic=dict(type=Keyword))
+    '''
+    label = Quantity(type=str, a_search=SearchQuantity())
 
     type = Quantity(type=MEnum(
         'compound_class', 'classification', 'prototype', 'prototype_id'),
-        a_elastic=dict(type=Keyword))
+        a_search=SearchQuantity())
 
     source = Quantity(
         type=MEnum('springer', 'aflow_prototype_library'),
-        a_elastic=dict(type=Keyword))
-
-
-ESLabel = elastic_mapping(Label.m_def, InnerDoc)
-
-
-class DFTCalcWithMetadata(CalcWithMetadata):
-
-    def __init__(self, **kwargs):
-        self.basis_set: str = None
-        self.xc_functional: str = None
-        self.system: str = None
-        self.crystal_system: str = None
-        self.spacegroup: str = None
-        self.spacegroup_symbol: str = None
-        self.code_name: str = None
-        self.code_version: str = None
-
-        self.n_geometries = 0
-        self.n_calculations = 0
-        self.n_total_energies = 0
-        self.n_quantities = 0
-        self.quantities = []
-        self.geometries = []
-        self.group_hash: str = None
-
-        self.labels: List[Label] = []
-        self.optimade: optimade.OptimadeEntry = None
-
-        super().__init__(**kwargs)
-
-    def update(self, **kwargs):
-        super().update(**kwargs)
-
-        if len(self.labels) > 0:
-            self.labels = [Label.m_from_dict(label) for label in self.labels]
-
-        if self.optimade is not None and isinstance(self.optimade, dict):
-            self.optimade = optimade.OptimadeEntry.m_from_dict(self.optimade)
-
-    def __getitem__(self, key):
-        value = super().__getitem__(key)
-
-        if key == 'labels':
-            return [item.m_to_dict() for item in value]
-
-        if key == 'optimade':
-            return value.m_to_dict()
-
-        return value
+        a_search=SearchQuantity())
+
+
+class DFTMetadata(MSection):
+    m_def = Section(a_domain='dft')
+
+    basis_set = Quantity(
+        type=str, default='not processed',
+        description='The used basis set functions.',
+        a_search=SearchQuantity(statistic_size=20, default_statistic=True))
+
+    xc_functional = Quantity(
+        type=str, default='not processed',
+        description='The libXC based xc functional classification used in the simulation.',
+        a_search=SearchQuantity(statistic_size=20, default_statistic=True))
+
+    system = Quantity(
+        type=str, default='not processed',
+        description='The system type of the simulated system.',
+        a_search=SearchQuantity(default_statistic=True))
+
+    crystal_system = Quantity(
+        type=str, default='not processed',
+        description='The crystal system type of the simulated system.',
+        a_search=SearchQuantity(default_statistic=True))
+
+    spacegroup = Quantity(
+        type=int, default='not processed',
+        description='The spacegroup of the simulated system as number.',
+        a_search=SearchQuantity())
+
+    spacegroup_symbol = Quantity(
+        type=str, default='not processed',
+        description='The spacegroup as international short symbol.',
+        a_search=SearchQuantity())
+
+    code_name = Quantity(
+        type=str, default='not processed',
+        description='The name of the used code.',
+        a_search=SearchQuantity(statistic_size=40, default_statistic=True))
+
+    code_version = Quantity(
+        type=str, default='not processed',
+        description='The version of the used code.',
+        a_search=SearchQuantity())
+
+    n_geometries = Quantity(
+        type=int, description='Number of unique geometries.',
+        a_sesrch=SearchQuantity(metric_name='geometries', metric='sum'))
+
+    n_calculations = Quantity(
+        type=int,
+        description='Number of single configuration calculation sections',
+        a_search=SearchQuantity(metric_name='calculations', metric='sum'))
+
+    n_total_energies = Quantity(
+        type=int, description='Number of total energy calculations',
+        a_search=SearchQuantity(metric_name='total_energies', metric='sum'))
+
+    n_quantities = Quantity(
+        type=int, description='Number of metainfo quantities parsed from the entry.',
+        a_search=SearchQuantity(metric='sum', metric_name='quantities'))
+
+    quantities = Quantity(
+        type=str, shape=['0..*'],
+        description='All quantities that are used by this entry.',
+        a_search=SearchQuantity(
+            metric_name='distinct_quantities', metric='cardinality', many_and='append'))
+
+    geometries = Quantity(
+        type=str, shape=['0..*'],
+        description='Hashes for each simulated geometry',
+        a_search=SearchQuantity(metric_name='unique_geometries', metric='cardinality'))
+
+    group_hash = Quantity(
+        type=str,
+        description='Hashes that describe unique geometries simulated by this code run.',
+        a_search=SearchQuantity(many_or='append', group='groups', metric_name='groups', metric='cardinality'))
+
+    labels = SubSection(
+        sub_section=Label, repeats=True,
+        description='The labels taken from AFLOW prototypes and springer.',
+        a_search='labels')
+
+    optimade = SubSection(
+        sub_section=optimade.OptimadeEntry,
+        description='Metadata used for the optimade API.',
+        a_search='optimade')
+
+    def m_update(self, **kwargs):
+        # TODO necessary?
+        if 'labels' in kwargs:
+            print('########################## A')
+            self.labels = [Label.m_from_dict(label) for label in kwargs.pop('labels')]
+
+        if 'optimade' in kwargs:
+            print('########################## B')
+            self.optimade = optimade.OptimadeEntry.m_from_dict(kwargs.pop('optimade'))
+
+        super().m_update(**kwargs)
 
     def apply_domain_metadata(self, backend):
         from nomad.normalizing.system import normalized_atom_labels
+        entry = self.m_parent
 
         logger = utils.get_logger(__name__).bind(
-            upload_id=self.upload_id, calc_id=self.calc_id, mainfile=self.mainfile)
+            upload_id=entry.upload_id, calc_id=entry.calc_id, mainfile=entry.mainfile)
 
         # code and code specific ids
         self.code_name = backend.get_value('program_name', 0)
@@ -153,44 +200,44 @@ class DFTCalcWithMetadata(CalcWithMetadata):
         except KeyError:
             self.code_version = config.services.unavailable_value
 
-        self.raw_id = get_optional_backend_value(backend, 'raw_id', 'section_run', 0)
+        raw_id = get_optional_backend_value(backend, 'raw_id', 'section_run', None)
+        if raw_id is not None:
+            entry.raw_id = raw_id
 
         # metadata (system, method, chemistry)
-        self.atoms = get_optional_backend_value(backend, 'atom_labels', 'section_system', [], logger=logger)
-        if hasattr(self.atoms, 'tolist'):
-            self.atoms = self.atoms.tolist()
-        self.n_atoms = len(self.atoms)
-        self.atoms = list(set(normalized_atom_labels(set(self.atoms))))
-        self.atoms.sort()
+        atoms = get_optional_backend_value(backend, 'atom_labels', 'section_system', [], logger=logger)
+        if hasattr(atoms, 'tolist'):
+            atoms = atoms.tolist()
+        entry.n_atoms = len(atoms)
+        atoms = list(set(normalized_atom_labels(set(atoms))))
+        atoms.sort()
+        entry.atoms = atoms
 
         self.crystal_system = get_optional_backend_value(
             backend, 'crystal_system', 'section_symmetry', logger=logger)
         self.spacegroup = get_optional_backend_value(
             backend, 'space_group_number', 'section_symmetry', 0, logger=logger)
         self.spacegroup_symbol = get_optional_backend_value(
-            backend, 'international_short_symbol', 'section_symmetry', 0, logger=logger)
+            backend, 'international_short_symbol', 'section_symmetry', logger=logger)
         self.basis_set = map_basis_set_to_basis_set_label(
             get_optional_backend_value(backend, 'program_basis_set_type', 'section_run', logger=logger))
         self.system = get_optional_backend_value(
             backend, 'system_type', 'section_system', logger=logger)
-        self.formula = get_optional_backend_value(
+        entry.formula = get_optional_backend_value(
             backend, 'chemical_composition_bulk_reduced', 'section_system', logger=logger)
         self.xc_functional = map_functional_name_to_xc_treatment(
             get_optional_backend_value(backend, 'XC_functional_name', 'section_method', logger=logger))
 
         # grouping
         self.group_hash = utils.hash(
-            self.formula,
+            entry.formula,
             self.spacegroup,
             self.basis_set,
             self.xc_functional,
             self.code_name,
             self.code_version,
-            self.with_embargo,
-            self.comment,
-            self.references,
-            self.uploader,
-            self.coauthors)
+            entry.with_embargo,
+            entry.uploader)
 
         # metrics and quantities
         quantities = set()
@@ -247,69 +294,3 @@ class DFTCalcWithMetadata(CalcWithMetadata):
 
         # optimade
         self.optimade = backend.get_mi2_section(optimade.OptimadeEntry.m_def)
-
-
-def _elastic_label_value(label):
-    if isinstance(label, str):
-        return label
-    else:
-        return elastic_obj(label, ESLabel)
-
-
-Domain(
-    'dft', DFTCalcWithMetadata,
-    quantities=dict(
-        basis_set=DomainQuantity(
-            'The used basis set functions.', aggregations=20),
-        xc_functional=DomainQuantity(
-            'The xc functional type used for the simulation.', aggregations=20),
-        system=DomainQuantity(
-            'The system type of the simulated system.', aggregations=10),
-        crystal_system=DomainQuantity(
-            'The crystal system type of the simulated system.', aggregations=10),
-        code_name=DomainQuantity(
-            'The code name.', aggregations=40),
-        spacegroup=DomainQuantity('The spacegroup of the simulated system as number'),
-        spacegroup_symbol=DomainQuantity('The spacegroup as international short symbol'),
-        geometries=DomainQuantity(
-            'Hashes that describe unique geometries simulated by this code run.', multi=True),
-        group_hash=DomainQuantity(
-            'A hash from key metadata used to group similar entries.'),
-        quantities=DomainQuantity(
-            'All quantities that are used by this calculation',
-            metric=('quantities', 'value_count'), multi=True),
-        n_total_energies=DomainQuantity(
-            'Number of total energy calculations',
-            elastic_mapping=Integer()),
-        n_calculations=DomainQuantity(
-            'Number of single configuration calculation sections',
-            elastic_mapping=Integer()),
-        n_quantities=DomainQuantity(
-            'Number of overall parsed quantities',
-            elastic_mapping=Integer()),
-        n_geometries=DomainQuantity(
-            'Number of unique geometries',
-            elastic_mapping=Integer()),
-        labels=DomainQuantity(
-            'Search based for springer classification and aflow prototypes',
-            elastic_field='labels.label',
-            elastic_mapping=Object(ESLabel),
-            elastic_value=lambda labels: [_elastic_label_value(label) for label in labels],
-            multi=True),
-        optimade=DomainQuantity(
-            'Search based on optimade\'s filter query language',
-            elastic_mapping=Object(optimade.ESOptimadeEntry),
-            elastic_value=lambda entry: elastic_obj(entry, optimade.ESOptimadeEntry)
-        )),
-    metrics=dict(
-        total_energies=('n_total_energies', 'sum'),
-        calculations=('n_calculations', 'sum'),
-        quantities=('n_quantities', 'sum'),
-        geometries=('n_geometries', 'sum'),
-        unique_geometries=('geometries', 'cardinality'),
-        groups=('group_hash', 'cardinality')
-    ),
-    groups=dict(
-        groups=('group_hash', 'groups')),
-    default_statistics=[
-        'atoms', 'dft.basis_set', 'dft.xc_functional', 'dft.system', 'dft.crystal_system', 'dft.code_name'])
diff --git a/nomad/datamodel/ems.py b/nomad/datamodel/ems.py
index 14277f4a9f..ff6a983567 100644
--- a/nomad/datamodel/ems.py
+++ b/nomad/datamodel/ems.py
@@ -12,55 +12,60 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 Experimental material science specific metadata
-"""
+'''
 
 from nomad import utils
+from nomad.metainfo import Quantity, MSection, Section, Datetime
+from nomad.metainfo.search import SearchQuantity
 
-from .base import CalcWithMetadata, DomainQuantity, Domain, get_optional_backend_value
+from .base import get_optional_backend_value
 
 
-class EMSEntryWithMetadata(CalcWithMetadata):
+class EMSMetadata(MSection):
+    m_def = Section(a_domain='ems')
 
-    def __init__(self, **kwargs):
-        # sample quantities
-        self.chemical: str = None
-        self.sample_constituents: str = None
-        self.sample_microstructure: str = None
+    # sample quantities
+    chemical = Quantity(type=str, default='not processed', a_search=SearchQuantity())
+    sample_constituents = Quantity(type=str, default='not processed', a_search=SearchQuantity(default_statistic=True))
+    sample_microstructure = Quantity(type=str, default='not processed', a_search=SearchQuantity(default_statistic=True))
 
-        # general metadata
-        self.experiment_summary: str = None
-        self.experiment_location: str = None
-        self.experiment_time: str = None
+    # general metadata
+    experiment_summary = Quantity(type=str, default='not processed', a_search=SearchQuantity())
+    experiment_location = Quantity(type=str, default='not processed', a_search=SearchQuantity())
+    experiment_time = Quantity(type=Datetime, default='not processed', a_search=SearchQuantity())
 
-        # method
-        self.method: str = None
-        self.probing_method: str = None
+    # method
+    method = Quantity(type=str, default='not processed', a_search=SearchQuantity(default_statistic=True))
+    probing_method = Quantity(type=str, default='not processed', a_search=SearchQuantity(default_statistic=True))
 
-        # data metadata
-        self.repository_name: str = None
-        self.repository_url: str = None
-        self.preview_url: str = None
+    # data metadata
+    repository_name = Quantity(type=str, default='not processed', a_search=SearchQuantity())
+    repository_url = Quantity(type=str, default='not processed', a_search=SearchQuantity())
+    preview_url = Quantity(type=str, default='not processed', a_search=SearchQuantity())
 
-        self.quantities = []
-        self.group_hash: str = None
-
-        super().__init__(**kwargs)
+    # TODO move
+    quantities = Quantity(type=str, shape=['0..*'], default=[], a_search=SearchQuantity())
+    group_hash = Quantity(type=str, a_search=SearchQuantity())
 
     def apply_domain_metadata(self, backend):
+        entry = self.m_parent
         logger = utils.get_logger(__name__).bind(
-            upload_id=self.upload_id, calc_id=self.calc_id, mainfile=self.mainfile)
+            upload_id=entry.upload_id, calc_id=entry.calc_id, mainfile=entry.mainfile)
 
-        self.formula = get_optional_backend_value(
+        entry.formula = get_optional_backend_value(
             backend, 'sample_chemical_formula', 'section_sample', logger=logger)
-        self.atoms = get_optional_backend_value(
+        atoms = get_optional_backend_value(
             backend, 'sample_atom_labels', 'section_sample', logger=logger)
-        if hasattr(self.atoms, 'tolist'):
-            self.atoms = self.atoms.tolist()
-        self.n_atoms = len(self.atoms)
-        self.atoms = list(set(self.atoms))
-        self.atoms.sort()
+        if hasattr(atoms, 'tolist'):
+            atoms = atoms.tolist()
+        entry.n_atoms = len(atoms)
+
+        atoms = list(set(atoms))
+        atoms.sort()
+        entry.atoms = atoms
+
         self.chemical = get_optional_backend_value(
             backend, 'sample_chemical_name', 'section_sample', logger=logger)
         self.sample_microstructure = get_optional_backend_value(
@@ -88,14 +93,11 @@ class EMSEntryWithMetadata(CalcWithMetadata):
             backend, 'data_preview_url', 'section_data', logger=logger)
 
         self.group_hash = utils.hash(
-            self.formula,
+            entry.formula,
             self.method,
             self.experiment_location,
-            self.with_embargo,
-            self.comment,
-            self.references,
-            self.uploader,
-            self.coauthors)
+            entry.with_embargo,
+            entry.uploader)
 
         quantities = set()
 
@@ -103,26 +105,3 @@ class EMSEntryWithMetadata(CalcWithMetadata):
             quantities.add(meta_info)
 
         self.quantities = list(quantities)
-
-
-Domain(
-    'ems', EMSEntryWithMetadata,
-    root_sections=['section_experiment', 'section_entry_info'],
-    metainfo_all_package='all.experimental.nomadmetainfo.json',
-    quantities=dict(
-        method=DomainQuantity(
-            'The experimental method used.', aggregations=20),
-        probing_method=DomainQuantity(
-            'The used probing method.', aggregations=10),
-        sample_microstructure=DomainQuantity(
-            'The sample micro structure.', aggregations=10),
-        sample_constituents=DomainQuantity(
-            'The sample constituents.', aggregations=10),
-        quantities=DomainQuantity(
-            'All quantities that are used by this calculation')),
-    metrics=dict(
-        quantities=('quantities', 'value_count')),
-    groups=dict(),
-    default_statistics=[
-        'atoms', 'ems.method', 'ems.probing_method', 'ems.sample_microstructure',
-        'ems.sample_constituents'])
diff --git a/nomad/datamodel/metainfo.py b/nomad/datamodel/metainfo.py
index ff6eaebd25..07212222e0 100644
--- a/nomad/datamodel/metainfo.py
+++ b/nomad/datamodel/metainfo.py
@@ -12,20 +12,36 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 This duplicates functionality for .base.py. It represents first pieces of a transition
 towards using the new metainfo system for all repository metadata.
-"""
-from typing import Dict
+'''
+from typing import Dict, Any
 from cachetools import cached, TTLCache
-from elasticsearch_dsl import Keyword
+from elasticsearch_dsl import Keyword, Text, analyzer, tokenizer
+import ase.data
 
 from nomad import metainfo, config
+from nomad.metainfo.search import SearchQuantity
 import nomad.metainfo.mongoengine
 
+from .dft import DFTMetadata
+from .ems import EMSMetadata
+
+
+def _only_atoms(atoms):
+    numbers = [ase.data.atomic_numbers[atom] for atom in atoms]
+    only_atoms = [ase.data.chemical_symbols[number] for number in sorted(numbers)]
+    return ''.join(only_atoms)
+
+
+path_analyzer = analyzer(
+    'path_analyzer',
+    tokenizer=tokenizer('path_tokenizer', 'pattern', pattern='/'))
+
 
 class User(metainfo.MSection):
-    """ A NOMAD user.
+    ''' A NOMAD user.
 
     Typically a NOMAD user has a NOMAD account. The user related data is managed by
     NOMAD keycloak user-management system. Users are used to denote uploaders, authors,
@@ -41,16 +57,26 @@ class User(metainfo.MSection):
         create: The time the account was created
         repo_user_id: The id that was used to identify this user in the NOMAD CoE Repository
         is_admin: Bool that indicated, iff the user the use admin user
-    """
+    '''
+
+    user_id = metainfo.Quantity(
+        type=str,
+        a_me=dict(primary_key=True),
+        a_search=SearchQuantity())
 
-    user_id = metainfo.Quantity(type=str, a_me=dict(primary_key=True))
     name = metainfo.Quantity(
         type=str,
-        derived=lambda user: ('%s %s' % (user.first_name, user.last_name)).strip())
+        derived=lambda user: ('%s %s' % (user.first_name, user.last_name)).strip(),
+        a_search=SearchQuantity(es_mapping=Text(fields={'keyword': Keyword()})))
+
     first_name = metainfo.Quantity(type=str)
     last_name = metainfo.Quantity(type=str)
     email = metainfo.Quantity(
-        type=str, a_me=dict(index=True), a_elastic=dict(mapping=Keyword))
+        type=str,
+        a_me=dict(index=True),
+        a_elastic=dict(mapping=Keyword),  # TODO remove?
+        a_search=SearchQuantity())
+
     username = metainfo.Quantity(type=str)
     affiliation = metainfo.Quantity(type=str)
     affiliation_address = metainfo.Quantity(type=str)
@@ -76,8 +102,33 @@ class User(metainfo.MSection):
         }
 
 
+class UserReference(metainfo.Reference):
+    '''
+    Special metainfo reference type that allows to use user_ids as values. It automatically
+    resolves user_ids to User objects. This is done lazily on getting the value.
+    '''
+
+    def __init__(self):
+        super().__init__(User.m_def)
+
+    def set_normalize(self, section: metainfo.MSection, quantity_def: metainfo.Quantity, value: Any) -> Any:
+        if isinstance(value, str):
+            return metainfo.MProxy(value)
+        else:
+            return super().set_normalize(section, quantity_def, value)
+
+    def resolve(self, section: metainfo.MSection, quantity_def: metainfo.Quantity, value: Any) -> metainfo.MSection:
+        return User.get(user_id=value.url)
+
+    def serialize(self, section: metainfo.MSection, quantity_def: metainfo.Quantity, value: Any) -> Any:
+        return value.user_id
+
+
+user_reference = UserReference()
+
+
 class Dataset(metainfo.MSection):
-    """ A Dataset is attached to one or many entries to form a set of data.
+    ''' A Dataset is attached to one or many entries to form a set of data.
 
     Args:
         dataset_id: The unique identifier for this dataset as a string. It should be
@@ -94,31 +145,96 @@ class Dataset(metainfo.MSection):
         pid: The original NOMAD CoE Repository dataset PID. Old DOIs still reference
             datasets based on this id. Is not used for new datasets.
         created: The date when the dataset was first created.
-    """
+    '''
     dataset_id = metainfo.Quantity(
         type=str,
-        a_me=dict(primary_key=True))
+        a_me=dict(primary_key=True),
+        a_search=SearchQuantity())
     name = metainfo.Quantity(
         type=str,
-        a_me=dict(index=True))
+        a_me=dict(index=True),
+        a_search=SearchQuantity())
     user_id = metainfo.Quantity(
         type=str,
         a_me=dict(index=True))
     doi = metainfo.Quantity(
         type=str,
-        a_me=dict(index=True))
+        a_me=dict(index=True),
+        a_search=SearchQuantity())
     pid = metainfo.Quantity(
         type=str,
         a_me=dict(index=True))
     created = metainfo.Quantity(
         type=metainfo.Datetime,
-        a_me=dict(index=True))
+        a_me=dict(index=True),
+        a_search=SearchQuantity())
 
 
-class UserMetadata(metainfo.MSection):
-    """ NOMAD entry quantities that are given by the user or determined by user actions.
+class DatasetReference(metainfo.Reference):
+    '''
+    Special metainfo reference type that allows to use dataset_ids as values. It automatically
+    resolves dataset_ids to Dataset objects. This is done lazily on getting the value.
+    '''
+
+    def __init__(self):
+        super().__init__(Dataset.m_def)
+
+    def set_normalize(self, section: metainfo.MSection, quantity_def: metainfo.Quantity, value: Any) -> Any:
+        if isinstance(value, str):
+            return metainfo.MProxy(value)
+        else:
+            return super().set_normalize(section, quantity_def, value)
+
+    def resolve(self, section: metainfo.MSection, quantity_def: metainfo.Quantity, value: Any) -> metainfo.MSection:
+        return Dataset.m_def.m_x('me').get(dataset_id=value.url)
+
+    def serialize(self, section: metainfo.MSection, quantity_def: metainfo.Quantity, value: Any) -> Any:
+        if isinstance(value, metainfo.MProxy):
+            return value.url
+        else:
+            return value.user_id
+
+    def deserialize(self, section: metainfo.MSection, quantity_def: metainfo.Quantity, value: Any) -> Any:
+        return metainfo.MProxy(value)
+
+
+dataset_reference = DatasetReference()
+
+
+class EditableUserMetadata(metainfo.MCategory):
+    ''' NOMAD entry quantities that can be edited by the user after publish. '''
+
+
+class UserMetadata(metainfo.MCategory):
+    ''' NOMAD entry quantities that are given by the user or determined by user actions. '''
+    pass
+
+
+class DomainMetadata(metainfo.MCategory):
+    ''' NOMAD entry quantities that are determined by the uploaded data. '''
+    pass
+
+
+class EntryMetadata(metainfo.MSection):
+    '''
+    Attributes:
+        upload_id: The ``upload_id`` of the calculations upload (random UUID).
+        calc_id: The unique mainfile based calculation id.
+        calc_hash: The raw file content based checksum/hash of this calculation.
+        pid: The unique persistent id of this calculation.
+        mainfile: The upload relative mainfile path.
+        domain: Must be the key for a registered domain. This determines which actual
+            subclass is instantiated.
+
+        files: A list of all files, relative to upload.
+        upload_time: The time when the calc was uploaded.
+        uploader: An object describing the uploading user, has at least ``user_id``
+        processed: Boolean indicating if this calc was successfully processed and archive
+            data and calc metadata is available.
+        last_processing: A datatime with the time of the last successful processing.
+        nomad_version: A string that describes the version of the nomad software that was
+            used to do the last successful processing.
 
-    Args:
         comment: An arbitrary string with user provided information about the entry.
         references: A list of URLs for resources that are related to the entry.
         uploader: Id of the uploader of this entry.
@@ -131,16 +247,217 @@ class UserMetadata(metainfo.MSection):
             user, and users the entry is shared with (see shared_with).
         upload_time: The time that this entry was uploaded
         datasets: Ids of all datasets that this entry appears in
-    """
-
-    comment = metainfo.Quantity(type=str)
-    references = metainfo.Quantity(type=str, shape=['0..*'])
-    uploader = metainfo.Quantity(type=str, a_flask=dict(admin_only=True, verify=User))
-    coauthors = metainfo.Quantity(type=str, shape=['0..*'], a_flask=dict(verify=User))
-    shared_with = metainfo.Quantity(type=str, shape=['0..*'], a_flask=dict(verify=User))
-    with_embargo = metainfo.Quantity(type=bool)
-    upload_time = metainfo.Quantity(type=metainfo.Datetime, a_flask=dict(admin_only=True))
-    datasets = metainfo.Quantity(type=str, shape=['0..*'], a_flask=dict(verify=Dataset))
+    '''
+    upload_id = metainfo.Quantity(
+        type=str,
+        description='A random UUID that uniquely identifies the upload of the entry.',
+        a_search=SearchQuantity(
+            many_or='append', group='uploads', metric_name='uploads', metric='cardinality'))
+
+    calc_id = metainfo.Quantity(
+        type=str,
+        description='A unique ID based on the upload id and entry\'s mainfile.',
+        a_search=SearchQuantity(many_or='append'))
+
+    calc_hash = metainfo.Quantity(
+        type=str,
+        description='A raw file content based checksum/hash.',
+        a_search=SearchQuantity(
+            many_or='append', metric_name='unique_entries', metric='cardinality'))
+
+    mainfile = metainfo.Quantity(
+        type=str,
+        description='The upload relative mainfile path.',
+        a_search=[
+            SearchQuantity(
+                description='Search within the mainfile path.',
+                es_mapping=Text(multi=True, analyzer=path_analyzer, fields={'keyword': Keyword()}),
+                many_or='append', es_quantity='mainfile.keyword'),
+            SearchQuantity(
+                description='Search for the exact mainfile.',
+                many_and='append', name='mainfile_path', es_quantity='mainfile.keyword')])
+
+    files = metainfo.Quantity(
+        type=str, shape=['0..*'],
+        description='The entries raw file paths relative to its upload.',
+        a_search=[
+            SearchQuantity(
+                description='Search within the paths.', name='path',
+                es_mapping=Text(
+                    multi=True, analyzer=path_analyzer, fields={'keyword': Keyword()})
+            ),
+            SearchQuantity(
+                description='Search for exact paths.',
+                many_or='append', name='files', es_quantity='files.keyword')])
+
+    pid = metainfo.Quantity(
+        type=int,
+        description='The unique, sequentially enumerated, integer persistent identifier',
+        a_search=SearchQuantity(many_or='append'))
+
+    raw_id = metainfo.Quantity(
+        type=str,
+        description='A raw format specific id that was acquired from the files of this entry',
+        a_search=SearchQuantity(many_or='append'))
+
+    domain = metainfo.Quantity(
+        type=metainfo.MEnum('dft', 'ems'),
+        description='The material science domain',
+        a_search=SearchQuantity())
+
+    published = metainfo.Quantity(
+        type=bool, default=False,
+        description='Indicates if the entry is published',
+        a_search=SearchQuantity())
+
+    processed = metainfo.Quantity(
+        type=bool, default=False,
+        description='Indicates that the entry is successfully processed.',
+        a_search=SearchQuantity())
+
+    last_processing = metainfo.Quantity(
+        type=metainfo.Datetime,
+        description='The datetime of the last attempted processing.')
+
+    nomad_version = metainfo.Quantity(
+        type=str,
+        description='The NOMAD version used for the last processing attempt.',
+        a_search=SearchQuantity(many_or='append'))
+    nomad_commit = metainfo.Quantity(
+        type=str,
+        description='The NOMAD commit used for the last processing attempt.',
+        a_search=SearchQuantity(many_or='append'))
+    parser_name = metainfo.Quantity(
+        type=str,
+        description='The NOMAD parser used for the last processing attempt.',
+        a_search=SearchQuantity(many_or='append'))
+
+    comment = metainfo.Quantity(
+        type=str, categories=[UserMetadata, EditableUserMetadata],
+        description='A user provided comment.',
+        a_search=SearchQuantity(es_mapping=Text()))
+
+    references = metainfo.Quantity(
+        type=str, shape=['0..*'], categories=[UserMetadata, EditableUserMetadata],
+        description='User provided references (URLs).',
+        a_search=SearchQuantity())
+
+    uploader = metainfo.Quantity(
+        type=user_reference, categories=[UserMetadata],
+        description='The uploader of the entry',
+        a_flask=dict(admin_only=True, verify=User),
+        a_search=[
+            SearchQuantity(
+                description='Search uploader with exact names.',
+                metric_name='uploaders', metric='cardinality',
+                many_or='append', es_quantity='uploader.name.keyword'),
+            SearchQuantity(
+                name='uploader_id', es_quantity='uploader.user_id')
+        ])
+
+    coauthors = metainfo.Quantity(
+        type=user_reference, shape=['0..*'], default=[], categories=[UserMetadata, EditableUserMetadata],
+        description='A user provided list of co-authors.',
+        a_flask=dict(verify=User))
+
+    authors = metainfo.Quantity(
+        type=user_reference, shape=['0..*'],
+        description='All authors (uploader and co-authors).',
+        derived=lambda entry: ([entry.uploader] if entry.uploader is not None else []) + entry.coauthors,
+        a_search=SearchQuantity(
+            description='Search authors with exact names.',
+            metric='cardinality',
+            many_or='append', es_quantity='authors.name.keyword', statistic_size=1000))
+
+    shared_with = metainfo.Quantity(
+        type=user_reference, shape=['0..*'], default=[], categories=[UserMetadata, EditableUserMetadata],
+        description='A user provided list of userts to share the entry with.',
+        a_flask=dict(verify=User))
+
+    owners = metainfo.Quantity(
+        type=user_reference, shape=['0..*'],
+        description='All owner (uploader and shared with users).',
+        derived=lambda entry: ([entry.uploader] if entry.uploader is not None else []) + entry.shared_with,
+        a_search=SearchQuantity(
+            description='Search owner with exact names.',
+            many_or='append', es_quantity='owners.name.keyword'))
+
+    with_embargo = metainfo.Quantity(
+        type=bool, default=False, categories=[UserMetadata, EditableUserMetadata],
+        description='Indicated if this entry is under an embargo',
+        a_search=SearchQuantity())
+
+    upload_time = metainfo.Quantity(
+        type=metainfo.Datetime, categories=[UserMetadata],
+        description='The datetime this entry was uploaded to nomad',
+        a_flask=dict(admin_only=True),
+        a_search=SearchQuantity(order_default=True))
+
+    upload_name = metainfo.Quantity(
+        type=str, categories=[UserMetadata],
+        description='The user provided upload name',
+        a_search=SearchQuantity(many_or='append'))
+
+    datasets = metainfo.Quantity(
+        type=dataset_reference, shape=['0..*'], default=[],
+        categories=[UserMetadata, EditableUserMetadata],
+        description='A list of user curated datasets this entry belongs to.',
+        a_flask=dict(verify=Dataset),
+        a_search=[
+            SearchQuantity(
+                es_quantity='datasets.name', many_or='append',
+                description='Search for a particular dataset by exact name.'),
+            SearchQuantity(
+                name='dataset_id', es_quantity='datasets.dataset_id', many_or='append',
+                group='datasets',
+                metric='cardinality', metric_name='datasets',
+                description='Search for a particular dataset by its id.')])
+
+    external_id = metainfo.Quantity(
+        type=str, categories=[UserMetadata],
+        description='A user provided external id.',
+        a_search=SearchQuantity(many_or='split'))
+
+    last_edit = metainfo.Quantity(
+        type=metainfo.Datetime, categories=[UserMetadata],
+        description='The datetime the user metadata was edited last.',
+        a_search=SearchQuantity())
+
+    formula = metainfo.Quantity(
+        type=str, categories=[DomainMetadata],
+        description='A (reduced) chemical formula.',
+        a_search=SearchQuantity())
+
+    atoms = metainfo.Quantity(
+        type=str, shape=['n_atoms'], default=[], categories=[DomainMetadata],
+        description='The atom labels of all atoms of the entry\'s material.',
+        a_search=SearchQuantity(
+            many_and='append', default_statistic=True, statistic_size=len(ase.data.chemical_symbols)))
+
+    only_atoms = metainfo.Quantity(
+        type=str, categories=[DomainMetadata],
+        description='The atom labels concatenated in order-number order.',
+        derived=lambda entry: _only_atoms(entry.atoms),
+        a_search=SearchQuantity(many_and='append', derived=_only_atoms))
+
+    n_atoms = metainfo.Quantity(
+        type=int, categories=[DomainMetadata],
+        description='The number of atoms in the entry\'s material',
+        a_search=SearchQuantity())
+
+    ems = metainfo.SubSection(sub_section=EMSMetadata, a_search='ems')
+    dft = metainfo.SubSection(sub_section=DFTMetadata, a_search='dft')
+
+    def apply_user_metadata(self, metadata: dict):
+        ''' Applies a user provided metadata dict to this calc. '''
+        self.m_update(**metadata)
+
+    def apply_domain_metadata(self, backend):
+        assert self.domain is not None, 'all entries must have a domain'
+        domain_section_def = self.m_def.all_sub_sections.get(self.domain).sub_section
+        assert domain_section_def is not None, 'unknown domain %s' % self.domain
+        domain_section = self.m_create(domain_section_def.section_cls)
+        domain_section.apply_domain_metadata(backend)
 
 
 nomad.metainfo.mongoengine.init_section(User)
diff --git a/nomad/doi.py b/nomad/doi.py
index f05cf45a15..8582a2fd12 100644
--- a/nomad/doi.py
+++ b/nomad/doi.py
@@ -12,10 +12,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 This module contains all functions necessary to manage DOI via datacite.org and its
 MDS API (https://support.datacite.org/docs/mds-api-guide).
-"""
+'''
 import xml.etree.ElementTree as ET
 import datetime
 import requests
@@ -28,7 +28,7 @@ from nomad import config, utils
 
 
 def edit_url(doi: str, url: str = None):
-    """ Changes the URL of an already findable DOI. """
+    ''' Changes the URL of an already findable DOI. '''
     if url is None:
         url = 'https://repository.nomad-coe.eu/app/gui/datasets/doi/%s' % doi
 
@@ -70,7 +70,7 @@ class DOI(Document):
 
     @staticmethod
     def create(title: str, user: User) -> 'DOI':
-        """ Creates a unique DOI with the NOMAD DOI prefix. """
+        ''' Creates a unique DOI with the NOMAD DOI prefix. '''
         # TODO We use a collection of all DOIs in mongo to ensure uniqueness. We attempt
         # to create new DOIs based on a counter per day until we find a non existing DOI.
         # This might be bad if many DOIs per day are to be expected.
diff --git a/nomad/files.py b/nomad/files.py
index 40a0456a7d..f6918865b6 100644
--- a/nomad/files.py
+++ b/nomad/files.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 Uploads contains classes and functions to create and maintain file structures
 for uploads.
 
@@ -46,7 +46,7 @@ might be published!
 There are multiple ways to solve this. Due to the rarity of the case, we take the
 most simple solution: if one file is public, all files are made public, execpt those
 being other mainfiles. Therefore, the aux files of a restricted calc might become public!
-"""
+'''
 
 from abc import ABCMeta
 import sys
@@ -60,8 +60,7 @@ import io
 import pickle
 import json
 
-from nomad import config, utils
-from nomad.datamodel import UploadWithMetadata
+from nomad import config, utils, datamodel
 from nomad.archive import write_archive
 
 # TODO this should become obsolete, once we are going beyong python 3.6. For now
@@ -76,21 +75,21 @@ user_metadata_filename = 'user_metadata.pickle'
 
 
 def always_restricted(path: str):
-    """
+    '''
     Used to put general restrictions on files, e.g. due to licensing issues. Will be
     called during packing and while accessing public files.
-    """
+    '''
     basename = os.path.basename(path)
     if basename.startswith('POTCAR') and not basename.endswith('.stripped'):
         return True
 
 
 def copytree(src, dst):
-    """
+    '''
     A close on ``shutils.copytree`` that does not try to copy the stats on all files.
     This is unecessary for our usecase and also causes permission denies for unknown
     reasons.
-    """
+    '''
     os.makedirs(dst, exist_ok=False)
 
     for item in os.listdir(src):
@@ -103,7 +102,7 @@ def copytree(src, dst):
 
 
 class PathObject:
-    """
+    '''
     Object storage-like abstraction for paths in general.
     Arguments:
         bucket: The bucket to store this object in
@@ -111,7 +110,7 @@ class PathObject:
         os_path: Override the "object storage" path with the given path.
         prefix: Add a x-digit prefix directory, e.g. foo/test/ -> foo/tes/test
         create_prefix: Create the prefix right away
-    """
+    '''
     def __init__(
             self, bucket: str, object_id: str, os_path: str = None,
             prefix: bool = False, create_prefix: bool = False) -> None:
@@ -153,7 +152,7 @@ class PathObject:
 
     @property
     def size(self) -> int:
-        """ The os determined file size. """
+        ''' The os determined file size. '''
         return os.stat(self.os_path).st_size
 
     def __repr__(self) -> str:
@@ -161,13 +160,13 @@ class PathObject:
 
 
 class DirectoryObject(PathObject):
-    """
+    '''
     Object storage-like abstraction for directories.
     Arguments:
         bucket: The bucket to store this object in
         object_id: The object id (i.e. directory path)
         create: True if the directory structure should be created. Default is False.
-    """
+    '''
     def __init__(self, bucket: str, object_id: str, create: bool = False, **kwargs) -> None:
         super().__init__(bucket, object_id, **kwargs)
         self._create = create
@@ -234,7 +233,7 @@ class UploadFiles(DirectoryObject, metaclass=ABCMeta):
             pickle.dump(data, f)
 
     def to_staging_upload_files(self, create: bool = False) -> 'StagingUploadFiles':
-        """ Casts to or creates corresponding staging upload files or returns None. """
+        ''' Casts to or creates corresponding staging upload files or returns None. '''
         raise NotImplementedError()
 
     @staticmethod
@@ -247,7 +246,7 @@ class UploadFiles(DirectoryObject, metaclass=ABCMeta):
             return None
 
     def raw_file(self, file_path: str, *args, **kwargs) -> IO:
-        """
+        '''
         Opens a raw file and returns a file-like object. Additional args, kwargs are
         delegated to the respective `open` call.
         Arguments:
@@ -255,38 +254,38 @@ class UploadFiles(DirectoryObject, metaclass=ABCMeta):
         Raises:
             KeyError: If the file does not exist.
             Restricted: If the file is restricted and upload access evaluated to False.
-        """
+        '''
         raise NotImplementedError()
 
     def raw_file_size(self, file_path: str) -> int:
-        """
+        '''
         Returns:
             The size of the given raw file.
-        """
+        '''
         raise NotImplementedError()
 
     def raw_file_manifest(self, path_prefix: str = None) -> Generator[str, None, None]:
-        """
+        '''
         Returns the path for all raw files in the archive (with a given prefix).
         Arguments:
             path_prefix: An optional prefix; only returns those files that have the prefix.
         Returns:
             An iterable over all (matching) raw files.
-        """
+        '''
         raise NotImplementedError()
 
     def raw_file_list(self, directory: str) -> List[Tuple[str, int]]:
-        """
+        '''
         Gives a list of directory contents and its size.
         Arguments:
             directory: The directory to list
         Returns:
             A list of tuples with file name and size.
-        """
+        '''
         raise NotImplementedError()
 
     def archive_file(self, calc_id: str, *args, **kwargs) -> IO:
-        """
+        '''
         Opens a archive file and returns a file-like objects. Additional args, kwargs are
         delegated to the respective `open` call.
         Arguments:
@@ -294,18 +293,18 @@ class UploadFiles(DirectoryObject, metaclass=ABCMeta):
         Raises:
             KeyError: If the calc does not exist.
             Restricted: If the file is restricted and upload access evaluated to False.
-        """
+        '''
         raise NotImplementedError()
 
     def archive_file_size(self, calc_id: str) -> int:
-        """
+        '''
         Returns:
             The size of the archive.
-        """
+        '''
         raise NotImplementedError()
 
     def archive_log_file(self, calc_id: str, *args, **kwargs) -> IO:
-        """
+        '''
         Opens a archive log file and returns a file-like objects. Additional args, kwargs are
         delegated to the respective `open` call.
         Arguments:
@@ -313,11 +312,11 @@ class UploadFiles(DirectoryObject, metaclass=ABCMeta):
         Raises:
             KeyError: If the calc does not exist.
             Restricted: If the file is restricted and upload access evaluated to False.
-        """
+        '''
         raise NotImplementedError()
 
     def open_zipfile_cache(self):
-        """ Allows to reuse the same zipfile for multiple file operations. Must be closed. """
+        ''' Allows to reuse the same zipfile for multiple file operations. Must be closed. '''
         pass
 
     def close_zipfile_cache(self):
@@ -398,7 +397,7 @@ class StagingUploadFiles(UploadFiles):
     def add_rawfiles(
             self, path: str, move: bool = False, prefix: str = None,
             force_archive: bool = False, target_dir: DirectoryObject = None) -> None:
-        """
+        '''
         Add rawfiles to the upload. The given file will be copied, moved, or extracted.
 
         Arguments:
@@ -408,7 +407,7 @@ class StagingUploadFiles(UploadFiles):
             force_archive: Expect the file to be a zip or other support archive file.
                 Usually those files are only extracted if they can be extracted and copied instead.
             target_dir: Overwrite the used directory to extract to. Default is the raw directory of this upload.
-        """
+        '''
         assert not self.is_frozen
         assert os.path.exists(path)
         self._size += os.stat(path).st_size
@@ -449,13 +448,13 @@ class StagingUploadFiles(UploadFiles):
 
     @property
     def is_frozen(self) -> bool:
-        """ Returns True if this upload is already *bagged*. """
+        ''' Returns True if this upload is already *bagged*. '''
         return self._frozen_file.exists()
 
     def pack(
-            self, upload: UploadWithMetadata, target_dir: DirectoryObject = None,
+            self, entries: Iterable[datamodel.EntryMetadata], target_dir: DirectoryObject = None,
             skip_raw: bool = False, skip_archive: bool = False) -> None:
-        """
+        '''
         Replaces the staging upload data with a public upload record by packing all
         data into files. It is only available if upload *is_bag*.
         This is potentially a long running operation.
@@ -466,7 +465,7 @@ class StagingUploadFiles(UploadFiles):
                 is the corresponding public upload files directory.
             skip_raw: determine to not pack the raw data, only archive and user metadata
             skip_raw: determine to not pack the archive data, only raw and user metadata
-        """
+        '''
         self.logger.info('started to pack upload')
 
         # freeze the upload
@@ -501,25 +500,25 @@ class StagingUploadFiles(UploadFiles):
         # zip archives
         if not skip_archive:
             with utils.timer(self.logger, 'packed zip json archive'):
-                self._pack_archive_files(upload, create_zipfile)
+                self._pack_archive_files(entries, create_zipfile)
             with utils.timer(self.logger, 'packed msgpack archive'):
-                self._pack_archive_files_msgpack(upload, write_msgfile)
+                self._pack_archive_files_msgpack(entries, write_msgfile)
 
         # zip raw files
         if not skip_raw:
             with utils.timer(self.logger, 'packed raw files'):
-                self._pack_raw_files(upload, create_zipfile)
+                self._pack_raw_files(entries, create_zipfile)
 
-    def _pack_archive_files_msgpack(self, upload: UploadWithMetadata, write_msgfile):
+    def _pack_archive_files_msgpack(self, entries: Iterable[datamodel.EntryMetadata], write_msgfile):
         restricted, public = 0, 0
-        for calc in upload.calcs:
+        for calc in entries:
             if calc.with_embargo:
                 restricted += 1
             else:
                 public += 1
 
         def create_iterator(with_embargo: bool):
-            for calc in upload.calcs:
+            for calc in entries:
                 if with_embargo == calc.with_embargo:
                     archive_file = self.archive_file_object(calc.calc_id)
                     if archive_file.exists():
@@ -535,12 +534,12 @@ class StagingUploadFiles(UploadFiles):
         except Exception as e:
             self.logger.error('exception during packing archives', exc_info=e)
 
-    def _pack_archive_files(self, upload: UploadWithMetadata, create_zipfile):
+    def _pack_archive_files(self, entries: Iterable[datamodel.EntryMetadata], create_zipfile):
         archive_public_zip = create_zipfile('archive', 'public', self._archive_ext)
         archive_restricted_zip = create_zipfile('archive', 'restricted', self._archive_ext)
 
         try:
-            for calc in upload.calcs:
+            for calc in entries:
                 archive_zip = archive_restricted_zip if calc.with_embargo else archive_public_zip
 
                 archive_filename = '%s.%s' % (calc.calc_id, self._archive_ext)
@@ -560,7 +559,7 @@ class StagingUploadFiles(UploadFiles):
             archive_restricted_zip.close()
             archive_public_zip.close()
 
-    def _pack_raw_files(self, upload: UploadWithMetadata, create_zipfile):
+    def _pack_raw_files(self, entries: Iterable[datamodel.EntryMetadata], create_zipfile):
         raw_public_zip = create_zipfile('raw', 'public', 'plain')
         raw_restricted_zip = create_zipfile('raw', 'restricted', 'plain')
 
@@ -568,7 +567,7 @@ class StagingUploadFiles(UploadFiles):
             # 1. add all public raw files
             # 1.1 collect all public mainfiles and aux files
             public_files: Dict[str, str] = {}
-            for calc in upload.calcs:
+            for calc in entries:
                 if not calc.with_embargo:
                     mainfile = calc.mainfile
                     assert mainfile is not None
@@ -578,7 +577,7 @@ class StagingUploadFiles(UploadFiles):
                             if not always_restricted(filepath):
                                 public_files[filepath] = None
             # 1.2 remove the non public mainfiles that have been added as auxfiles of public mainfiles
-            for calc in upload.calcs:
+            for calc in entries:
                 if calc.with_embargo:
                     mainfile = calc.mainfile
                     assert mainfile is not None
@@ -629,14 +628,14 @@ class StagingUploadFiles(UploadFiles):
         return results
 
     def calc_files(self, mainfile: str, with_mainfile: bool = True, with_cutoff: bool = True) -> Iterable[str]:
-        """
+        '''
         Returns all the auxfiles and mainfile for a given mainfile. This implements
         nomad's logic about what is part of a calculation and what not. The mainfile
         is first entry, the rest is sorted.
         Arguments:
             mainfile: The mainfile relative to upload
             with_mainfile: Do include the mainfile, default is True
-        """
+        '''
         mainfile_object = self._raw_dir.join_file(mainfile)
         if not mainfile_object.exists():
             raise KeyError(mainfile)
@@ -666,7 +665,7 @@ class StagingUploadFiles(UploadFiles):
             return aux_files
 
     def calc_id(self, mainfile: str) -> str:
-        """
+        '''
         Calculates a id for the given calc.
         Arguments:
             mainfile: The mainfile path relative to the upload that identifies the calc in the folder structure.
@@ -674,11 +673,11 @@ class StagingUploadFiles(UploadFiles):
             The calc id
         Raises:
             KeyError: If the mainfile does not exist.
-        """
+        '''
         return utils.hash(self.upload_id, mainfile)
 
     def calc_hash(self, mainfile: str) -> str:
-        """
+        '''
         Calculates a hash for the given calc based on file contents and aux file contents.
         Arguments:
             mainfile: The mainfile path relative to the upload that identifies the calc in the folder structure.
@@ -686,7 +685,7 @@ class StagingUploadFiles(UploadFiles):
             The calculated hash
         Raises:
             KeyError: If the mainfile does not exist.
-        """
+        '''
         hash = hashlib.sha512()
         for filepath in self.calc_files(mainfile):
             with open(self._raw_dir.join_file(filepath).os_path, 'rb') as f:
@@ -702,12 +701,12 @@ class StagingUploadFiles(UploadFiles):
 
 
 class ArchiveBasedStagingUploadFiles(StagingUploadFiles):
-    """
+    '''
     :class:`StagingUploadFiles` based on a single uploaded archive file (.zip)
 
     Arguments:
         upload_path: The path to the uploaded file.
-    """
+    '''
 
     def __init__(
             self, upload_id: str, upload_path: str, *args, **kwargs) -> None:
@@ -736,12 +735,12 @@ class ArchiveBasedStagingUploadFiles(StagingUploadFiles):
 
 
 class PublicUploadFilesBasedStagingUploadFiles(StagingUploadFiles):
-    """
+    '''
     :class:`StagingUploadFiles` based on a single uploaded archive file (.zip)
 
     Arguments:
         upload_path: The path to the uploaded file.
-    """
+    '''
 
     def __init__(
             self, public_upload_files: 'PublicUploadFiles', *args, **kwargs) -> None:
@@ -763,9 +762,9 @@ class PublicUploadFilesBasedStagingUploadFiles(StagingUploadFiles):
     def add_rawfiles(self, *args, **kwargs) -> None:
         assert False, 'do not add_rawfiles to a %s' % self.__class__.__name__
 
-    def pack(self, upload: UploadWithMetadata, *args, **kwargs) -> None:
-        """ Packs only the archive contents and stores it in the existing public upload files. """
-        super().pack(upload, target_dir=self.public_upload_files, skip_raw=True)
+    def pack(self, entries: Iterable[datamodel.EntryMetadata], *args, **kwargs) -> None:
+        ''' Packs only the archive contents and stores it in the existing public upload files. '''
+        super().pack(entries, target_dir=self.public_upload_files, skip_raw=True)
 
 
 class PublicUploadFiles(UploadFiles):
@@ -952,13 +951,13 @@ class PublicUploadFiles(UploadFiles):
         return self._file('archive', self._archive_ext, '%s.log' % calc_id, *args, **kwargs)
 
     def re_pack(
-            self, upload: UploadWithMetadata, skip_raw: bool = False,
+            self, entries: Iterable[datamodel.EntryMetadata], skip_raw: bool = False,
             skip_archive: bool = False) -> None:
-        """
+        '''
         Replaces the existing public/restricted data file pairs with new ones, based
         on current restricted information in the metadata. Should be used after updating
         the restrictions on calculations. This is potentially a long running operation.
-        """
+        '''
         # compute a list of files to repack
         files = []
         kinds = []
@@ -991,10 +990,10 @@ class PublicUploadFiles(UploadFiles):
         # perform the repacking
         try:
             if not skip_archive:
-                staging_upload._pack_archive_files(upload, create_zipfile)
-                staging_upload._pack_archive_files_msgpack(upload, write_msgfile)
+                staging_upload._pack_archive_files(entries, create_zipfile)
+                staging_upload._pack_archive_files_msgpack(entries, write_msgfile)
             if not skip_raw:
-                staging_upload._pack_raw_files(upload, create_zipfile)
+                staging_upload._pack_raw_files(entries, create_zipfile)
         finally:
             staging_upload.delete()
 
diff --git a/nomad/infrastructure.py b/nomad/infrastructure.py
index 84da75147b..11aa8d2d6f 100644
--- a/nomad/infrastructure.py
+++ b/nomad/infrastructure.py
@@ -12,12 +12,12 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 This module provides function to establish connections to the database, searchengine, etc.
 infrastructure services. Usually everything is setup at once with :func:`setup`. This
 is run once for each *api* and *worker* process. Individual functions for partial setups
 exist to facilitate testing, :py:mod:`nomad.migration`, aspects of :py:mod:`nomad.cli`, etc.
-"""
+'''
 
 import os.path
 import shutil
@@ -42,19 +42,19 @@ from nomad import config, utils
 logger = None
 
 elastic_client = None
-""" The elastic search client. """
+''' The elastic search client. '''
 
 mongo_client = None
-""" The pymongo mongodb client. """
+''' The pymongo mongodb client. '''
 
 
 def setup():
-    """
+    '''
     Uses the current configuration (nomad/config.py and environment) to setup all the
     infrastructure services (repository db, mongo, elastic search) and logging.
     Will create client instances for the databases and has to be called before they
     can be used.
-    """
+    '''
     setup_logging()
     setup_mongo()
     setup_elastic()
@@ -75,7 +75,7 @@ def setup_logging():
 
 
 def setup_mongo():
-    """ Creates connection to mongodb. """
+    ''' Creates connection to mongodb. '''
     global mongo_client
     try:
         mongo_client = connect(db=config.mongo.db_name, host=config.mongo.host, port=config.mongo.port)
@@ -88,7 +88,7 @@ def setup_mongo():
 
 
 def setup_elastic():
-    """ Creates connection to elastic search. """
+    ''' Creates connection to elastic search. '''
     global elastic_client
     elastic_client = connections.create_connection(
         hosts=['%s:%d' % (config.elastic.host, config.elastic.port)],
@@ -111,10 +111,10 @@ def setup_elastic():
 
 
 class Keycloak():
-    """
+    '''
     A class that encapsulates all keycloak related functions for easier mocking and
     configuration
-    """
+    '''
     def __init__(self):
         self.__oidc_client = None
         self.__admin_client = None
@@ -148,7 +148,7 @@ class Keycloak():
         return self.__public_keys
 
     def authorize_flask(self, basic: bool = True) -> str:
-        """
+        '''
         Authorizes the current flask request with keycloak. Uses either Bearer or Basic
         authentication, depending on available headers in the request. Bearer auth is
         basically offline (besides retrieving and caching keycloaks public key for signature
@@ -157,7 +157,7 @@ class Keycloak():
         Will set ``g.user``, either with None or user data from the respective OIDC token.
 
         Returns: An error message or None
-        """
+        '''
         g.oidc_access_token = None
         if 'Authorization' in request.headers and request.headers['Authorization'].startswith('Bearer '):
             g.oidc_access_token = request.headers['Authorization'].split(None, 1)[1].strip()
@@ -235,10 +235,10 @@ class Keycloak():
             pass
 
     def add_user(self, user, bcrypt_password=None, invite=False):
-        """
+        '''
         Adds the given :class:`nomad.datamodel.User` instance to the configured keycloak
         realm using the keycloak admin API.
-        """
+        '''
         from nomad import datamodel
         if not isinstance(user, datamodel.User):
             if 'user_id' not in user:
@@ -337,12 +337,12 @@ class Keycloak():
             for keycloak_user in keycloak_results]
 
     def get_user(self, user_id: str = None, username: str = None, user=None) -> object:
-        """
+        '''
         Retrives all available information about a user from the keycloak admin
         interface. This must be used to retrieve complete user information, because
         the info solely gathered from tokens (i.e. for the authenticated user ``g.user``)
         is generally incomplete.
-        """
+        '''
 
         if user is not None and user_id is None:
             user_id = user.user_id
@@ -390,7 +390,7 @@ keycloak = Keycloak()
 
 
 def reset(remove: bool):
-    """
+    '''
     Resets the databases mongo, elastic/calcs, and all files. Be careful.
     In contrast to :func:`remove`, it will only remove the contents of dbs and indicies.
     This function just attempts to remove everything, there is no exception handling
@@ -398,7 +398,7 @@ def reset(remove: bool):
 
     Args:
         remove: Do not try to recreate empty databases, remove entirely.
-    """
+    '''
     try:
         if not mongo_client:
             setup_mongo()
diff --git a/nomad/metainfo/CONCEPT.md b/nomad/metainfo/CONCEPT.md
index 9d1fb4324d..f99214f37c 100644
--- a/nomad/metainfo/CONCEPT.md
+++ b/nomad/metainfo/CONCEPT.md
@@ -179,9 +179,9 @@ Arbitrary serializable objects that can contain additional information.
 This could be code, from a python module that represents the NOMAD *common* package `nomad.metainfo.common`:
 ```python
 class System(MSection):
-    """
+    '''
     The system is ...
-    """
+    '''
 
     n_atoms = Quantity(type=int, derived_from='atom_labels')
 
@@ -189,9 +189,9 @@ class System(MSection):
         shape=['n_atoms'],
         type=MEnum(ase.data.chemical_symbols),
         annotations=[ElasticSearchQuantity('keyword')])
-    """
+    '''
     Atom labels are ...
-    """
+    '''
 
     formula_hill = Quantity(type=str, derived_from=['atom_labels'])
 
diff --git a/nomad/metainfo/__init__.py b/nomad/metainfo/__init__.py
index 9a54c3c36a..0521e9d1cb 100644
--- a/nomad/metainfo/__init__.py
+++ b/nomad/metainfo/__init__.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 The NOMAD meta-info allows to define schemas for physics data independent of the used
 storage format. It allows to define physics quantities with types, complex shapes
 (vetors, matrices, etc.), units, links, and descriptions. It allows to organize large
@@ -32,15 +32,15 @@ Starting example
     from nomad.metainfo import MSection, Quantity, SubSection, Units
 
     class System(MSection):
-        \"\"\"
+        \'\'\'
         A system section includes all quantities that describe a single a simulated
         system (a.k.a. geometry).
-        \"\"\"
+        \'\'\'
 
         n_atoms = Quantity(
-            type=int, description='''
+            type=int, description=\'\'\'
             A Defines the number of atoms in the system.
-            ''')
+            \'\'\')
 
         atom_labels = Quantity(type=MEnum(ase.data.chemical_symbols), shape['n_atoms'])
         atom_positions = Quantity(type=float, shape=['n_atoms', 3], unit=Units.m)
@@ -146,7 +146,7 @@ A `section class` looks like this:
 .. code-block:: python
 
     class SectionName(BaseSection):
-        ''' Section description '''
+        \'\'\' Section description \'\'\'
         m_def = Section(**section_attributes)
 
         quantity_name = Quantity(**quantity_attributes)
@@ -186,7 +186,7 @@ category looks like this:
 .. code-block:: python
 
     class CategoryName(MCategory):
-        ''' Category description '''
+        \'\'\' Category description \'\'\'
         m_def = Category(links=['http://further.explanation.eu'], categories=[ParentCategory])
 
 Packages
@@ -272,7 +272,7 @@ A more complex example
 .. literalinclude:: ../nomad/metainfo/example.py
     :language: python
 
-"""
+'''
 
 from .metainfo import MSection, MCategory, Definition, Property, Quantity, SubSection, \
     Section, Category, Package, Environment, MEnum, Datetime, MProxy, MetainfoError, DeriveError, \
diff --git a/nomad/metainfo/elastic.py b/nomad/metainfo/elastic.py
index 352b74b7ae..c386703864 100644
--- a/nomad/metainfo/elastic.py
+++ b/nomad/metainfo/elastic.py
@@ -12,15 +12,15 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 Adds elastic search support to the metainfo.
-"""
+'''
 
 from . import Section, MSection
 
 
 def elastic_mapping(section: Section, base_cls: type) -> type:
-    """ Creates an elasticsearch_dsl document class from a section definition. """
+    ''' Creates an elasticsearch_dsl document class from a section definition. '''
 
     dct = {
         name: quantity.m_annotations['elastic']['type']()
diff --git a/nomad/metainfo/example.py b/nomad/metainfo/example.py
index 3be207f567..3f04abd9cb 100644
--- a/nomad/metainfo/example.py
+++ b/nomad/metainfo/example.py
@@ -1,4 +1,4 @@
-""" An example metainfo package. """
+''' An example metainfo package. '''
 
 import numpy as np
 from datetime import datetime
@@ -9,28 +9,28 @@ m_package = Package(links=['http://metainfo.nomad-coe.eu'])
 
 
 class SystemHash(MCategory):
-    """ All quantities that contribute to what makes a system unique. """
+    ''' All quantities that contribute to what makes a system unique. '''
 
 
 class Parsing(MSection):
-    """ All data that describes the NOMAD parsing of this run.
+    ''' All data that describes the NOMAD parsing of this run.
 
     Quantities can also be documented like this:
 
     Args:
         parser_name: 'Name of the used parser'
         parser_version: 'Version of the used parser'
-    """
+    '''
 
     parser_name = Quantity(type=str)
     parser_version = Quantity(type=str)
-    nomad_version = Quantity(type=str)
+    nomad_version = Quantity(type=str, default='latest')
     warnings = Quantity(type=str, shape=['0..*'])
     parse_time = Quantity(type=Datetime)
 
 
 class System(MSection):
-    """ All data that describes a simulated system. """
+    ''' All data that describes a simulated system. '''
 
     n_atoms = Quantity(
         type=int, derived=lambda system: len(system.atom_labels),
@@ -63,7 +63,7 @@ class SCC(MSection):
 
 
 class Run(MSection):
-    """ All data that belongs to a single code run. """
+    ''' All data that belongs to a single code run. '''
 
     code_name = Quantity(type=str, description='The name of the code that was run.')
     code_version = Quantity(type=str, description='The version of the code that was run.')
@@ -78,7 +78,7 @@ class Run(MSection):
 
 
 class VaspRun(Run):
-    """ All VASP specific quantities for section Run. """
+    ''' All VASP specific quantities for section Run. '''
     m_def = Section(extends_base_section=True)
 
     x_vasp_raw_format = Quantity(
diff --git a/nomad/metainfo/flask_restplus.py b/nomad/metainfo/flask_restplus.py
index 5ee7fa53de..a7621e5089 100644
--- a/nomad/metainfo/flask_restplus.py
+++ b/nomad/metainfo/flask_restplus.py
@@ -6,7 +6,7 @@ from .metainfo import Section, Quantity, Datetime
 
 
 def field(quantity: Quantity):
-    """ Returns a flask restplus field with quantity type and shape. """
+    ''' Returns a flask restplus field with quantity type and shape. '''
     field = None
     if quantity.type == int:
         field = fields.Integer
diff --git a/nomad/metainfo/legacy.py b/nomad/metainfo/legacy.py
index 3b0a3c4f84..e66b5cf4c5 100644
--- a/nomad/metainfo/legacy.py
+++ b/nomad/metainfo/legacy.py
@@ -50,7 +50,7 @@ def from_legacy_metainfo(meta_info_env, package_names: List[str] = None) \
 
 
 class LegacyMetainfoEnvironment:
-    """
+    '''
     Args:
         env: The metainfo environment that is used to manage the definitions.
         orig_legacy_env: The old metainfo :class:`InfoKindEnv` environment with the
@@ -59,7 +59,7 @@ class LegacyMetainfoEnvironment:
             converted metainfo environment.
         all_legacy_defs: A dict that stores the original :class:`InfoKindEl`s by name.
         all_defs: A dict that stroed the converted section and category definitions.
-    """
+    '''
     def __init__(self, metainfo=Union[InfoKindEnv, str], package_names: List[str] = None, logger=None):
         self.logger = utils.get_logger(__name__) if logger is None else logger
         self.env = Environment()
@@ -109,9 +109,9 @@ class LegacyMetainfoEnvironment:
 
     def convert_package(
             self, legacy_definitions: List[InfoKindEl], **kwargs) -> Package:
-        """ Converts a single legacy metainfo package, i.e. a list of :class:`InfoKindEl`
+        ''' Converts a single legacy metainfo package, i.e. a list of :class:`InfoKindEl`
         into a metainfo package.
-        """
+        '''
         package = Package(**kwargs)
 
         definition: Definition = None
@@ -212,7 +212,7 @@ class LegacyMetainfoEnvironment:
         return package
 
     def legacy_info(self, definition: Definition, *args, **kwargs) -> InfoKindEl:
-        """ Creates a legacy metainfo objects for the given definition. """
+        ''' Creates a legacy metainfo objects for the given definition. '''
         super_names: List[str] = list()
         result: Dict[str, Any] = dict(
             name=definition.name,
@@ -266,7 +266,7 @@ class LegacyMetainfoEnvironment:
         return InfoKindEl(*args, **result, **kwargs)
 
     def legacy_info_env(self, packages: List[Package] = None, *args, **kwargs) -> InfoKindEnv:
-        """ Creates a legacy metainfo environment with all definitions from the given packages. """
+        ''' Creates a legacy metainfo environment with all definitions from the given packages. '''
         if packages is None:
             packages = self.env.packages
 
@@ -331,7 +331,7 @@ class LegacyMetainfoEnvironment:
 
 
 if __name__ == '__main__':
-    """ Converts the old metainfo and code-generates definitions for the new metainfo """
+    ''' Converts the old metainfo and code-generates definitions for the new metainfo '''
     env = LegacyMetainfoEnvironment(
         metainfo='vasp.nomadmetainfo.json',
         package_names=['%s.nomadmetainfo.json' % pkg for pkg in ['common', 'public', 'vasp']])
diff --git a/nomad/metainfo/metainfo.py b/nomad/metainfo/metainfo.py
index 9c96ca4b20..4febbc2e5a 100644
--- a/nomad/metainfo/metainfo.py
+++ b/nomad/metainfo/metainfo.py
@@ -29,6 +29,7 @@ import aniso8601
 from datetime import datetime
 import pytz
 import docstring_parser
+import flask_restplus.inputs
 
 
 m_package: 'Package' = None
@@ -41,24 +42,24 @@ T = TypeVar('T')
 # Metainfo errors
 
 class MetainfoError(Exception):
-    """ Metainfo related errors. """
+    ''' Metainfo related errors. '''
     pass
 
 
 class DeriveError(MetainfoError):
-    """ An error occurred while computing a derived value. """
+    ''' An error occurred while computing a derived value. '''
     pass
 
 
 class MetainfoReferenceError(MetainfoError):
-    """ An error indicating that a reference could not be resolved. """
+    ''' An error indicating that a reference could not be resolved. '''
     pass
 
 
 # Metainfo quantity data types
 
 class MEnum():
-    """Allows to define str types with values limited to a pre-set list of possible values."""
+    '''Allows to define str types with values limited to a pre-set list of possible values.'''
     def __init__(self, *args, **kwargs):
         # Supports one big list in place of args
         if len(args) == 1 and isinstance(args[0], list):
@@ -80,18 +81,18 @@ class MEnum():
 
 
 class MProxy():
-    """ A placeholder object that acts as reference to a value that is not yet resolved.
+    ''' A placeholder object that acts as reference to a value that is not yet resolved.
 
     Attributes:
         url: The reference represented as an URL string.
-    """
+    '''
 
     def __init__(self, url: str):
         self.url = url
 
 
 class DataType:
-    """
+    '''
     Allows to define custom data types that can be used in the meta-info.
 
     The metainfo supports the most types out of the box. These includes the python build-in
@@ -102,21 +103,21 @@ class DataType:
     type checks and various value transformations. This allows to store values in the
     section differently from how the usermight set/get them, and it allows to have non
     serializeable values that are transformed on de-/serialization.
-    """
+    '''
     def set_normalize(self, section: 'MSection', quantity_def: 'Quantity', value: Any) -> Any:
-        """ Transforms the given value before it is set and checks its type. """
+        ''' Transforms the given value before it is set and checks its type. '''
         return value
 
     def get_normalize(self, section: 'MSection', quantity_def: 'Quantity', value: Any) -> Any:
-        """ Transforms the given value when it is get. """
+        ''' Transforms the given value when it is get. '''
         return value
 
     def serialize(self, section: 'MSection', quantity_def: 'Quantity', value: Any) -> Any:
-        """ Transforms the given value when making the section serializeable. """
+        ''' Transforms the given value when making the section serializeable. '''
         return value
 
     def deserialize(self, section: 'MSection', quantity_def: 'Quantity', value: Any) -> Any:
-        """ Transforms the given value from its serializeable form. """
+        ''' Transforms the given value from its serializeable form. '''
         return value
 
 
@@ -175,7 +176,7 @@ class _Unit(DataType):
 
 
 units = pint.UnitRegistry()
-""" The default pint unit registry that should be used to give units to quantity definitions. """
+''' The default pint unit registry that should be used to give units to quantity definitions. '''
 
 
 class _Callable(DataType):
@@ -187,7 +188,7 @@ class _Callable(DataType):
 
 
 class _QuantityType(DataType):
-    """ Data type for defining the type of a metainfo quantity.
+    ''' Data type for defining the type of a metainfo quantity.
 
     A metainfo quantity type can be one of
 
@@ -197,7 +198,7 @@ class _QuantityType(DataType):
     - an MEnum instance to use it's values as possible str values
     - a custom datatype, i.e. instance of :class:`DataType`
     - Any
-    """
+    '''
 
     def set_normalize(self, section, quantity_def, value):
         if value in [str, int, float, bool]:
@@ -261,7 +262,7 @@ class _QuantityType(DataType):
 
 
 class Reference(DataType):
-    """ Datatype used for reference quantities. """
+    ''' Datatype used for reference quantities. '''
 
     def __init__(self, section_def: 'Section'):
         if not isinstance(section_def, Section):
@@ -292,12 +293,15 @@ class Reference(DataType):
 
         return value
 
+    def resolve(self, section: 'MSection', quantity_def: 'Quantity', value: Any) -> 'MSection':
+        return section.m_resolve(value.url)
+
     def get_normalize(self, section: 'MSection', quantity_def: 'Quantity', value: Any) -> Any:
         if isinstance(value, MProxy):
-            resolved: 'MSection' = section.m_resolve(value.url)
+            resolved: 'MSection' = self.resolve(section, quantity_def, value)
             if resolved is None:
                 raise ReferenceError('Could not resolve %s from %s.' % (value, section))
-            section.m_set(quantity_def, value)
+
             return resolved
 
         return value
@@ -311,31 +315,46 @@ class Reference(DataType):
 
 class _Datetime(DataType):
 
-    def __parse(self, datetime_str: str) -> datetime:
+    def _parse(self, datetime_str: str) -> datetime:
         try:
-            try:
-                return aniso8601.parse_datetime(datetime_str)
-            except ValueError:
-                date = aniso8601.parse_date(datetime_str)
-                return datetime(date.year, date.month, date.day)
-        except Exception:
-            raise TypeError('Invalid date literal "{0}"'.format(datetime_str))
+            return aniso8601.parse_datetime(datetime_str)
+        except ValueError:
+            pass
 
-    def set_normalize(self, section: 'MSection', quantity_def: 'Quantity', value: Any) -> Any:
+        try:
+            return aniso8601.parse_date(datetime_str)
+        except ValueError:
+            pass
+
+        try:
+            # TODO necessary?
+            return flask_restplus.inputs.datetime_from_rfc822(datetime_str)
+        except ValueError:
+            pass
+
+        raise TypeError('Invalid date literal "{0}"'.format(datetime_str))
+
+    def _convert(self, value):
         if isinstance(value, str):
-            value = self.__parse(value)
+            value = self._parse(value)
+
+        elif isinstance(value, (int, float)):
+            value = datetime.fromtimestamp(value)
 
         if not isinstance(value, datetime):
             raise TypeError('%s is not a datetime.' % value)
 
         return value
 
+    def set_normalize(self, section: 'MSection', quantity_def: 'Quantity', value: Any) -> Any:
+        return self._convert(value)
+
     def serialize(self, section: 'MSection', quantity_def: 'Quantity', value: Any) -> Any:
         value.replace(tzinfo=pytz.utc)
         return value.isoformat()
 
     def deserialize(self, section: 'MSection', quantity_def: 'Quantity', value: Any) -> Any:
-        return self.__parse(value)
+        return self._convert(value)
 
 
 Dimension = _Dimension()
@@ -365,7 +384,7 @@ class MObjectMeta(type):
 
 
 SectionDef = Union[str, 'Section', 'SubSection', Type[MSectionBound]]
-""" Type for section definition references.
+''' Type for section definition references.
 
 This can either be :
 
@@ -373,11 +392,11 @@ This can either be :
 - the section definition itself
 - the definition of a sub section
 - or the section definition Python class
-"""
+'''
 
 
 class MData:
-    """ An interface for low-level metainfo data objects.
+    ''' An interface for low-level metainfo data objects.
 
     Metainfo data objects store the data of a single section instance. This interface
     constitutes the minimal functionality for accessing and modifying section data.
@@ -386,7 +405,7 @@ class MData:
     All section instances will implement this interface, usually be delegating calls to
     a standalone implementation of this interface. This allows to configure various
     data backends on section instance creation.
-    """
+    '''
 
     def __getitem__(self, key):
         raise NotImplementedError()
@@ -395,47 +414,52 @@ class MData:
         raise NotImplementedError()
 
     def m_set(self, section: 'MSection', quantity_def: 'Quantity', value: Any) -> None:
-        """ Set the given value for the given quantity. """
+        ''' Set the given value for the given quantity. '''
         raise NotImplementedError()
 
     def m_get(self, section: 'MSection', quantity_def: 'Quantity') -> Any:
-        """ Retrieve the given value for the given quantity. """
+        ''' Retrieve the given value for the given quantity. '''
         raise NotImplementedError()
 
     def m_is_set(self, section: 'MSection', quantity_def: 'Quantity') -> bool:
-        """ True iff this quantity was explicitely set. """
+        ''' True iff this quantity was explicitely set. '''
         raise NotImplementedError()
 
     def m_add_values(
             self, section: 'MSection', quantity_def: 'Quantity', values: Any,
             offset: int) -> None:
-        """ Add (partial) values for the given quantity of higher dimensionality. """
+        ''' Add (partial) values for the given quantity of higher dimensionality. '''
         raise NotImplementedError()
 
     def m_add_sub_section(
             self, section: 'MSection', sub_section_def: 'SubSection',
             sub_section: 'MSection') -> None:
-        """ Adds the given section instance as a sub section of the given sub section definition. """
+        ''' Adds the given section instance as a sub section of the given sub section definition. '''
+        raise NotImplementedError()
+
+    def m_remove_sub_section(
+            self, section: 'MSection', sub_section_def: 'SubSection', index: int) -> None:
+        ''' Removes the given section instance as a sub section of the given sub section definition. '''
         raise NotImplementedError()
 
     def m_get_sub_section(
             self, section: 'MSection', sub_section_def: 'SubSection',
             index: int) -> 'MSection':
-        """ Retrieves a single sub section of the given sub section definition. """
+        ''' Retrieves a single sub section of the given sub section definition. '''
         raise NotImplementedError()
 
     def m_get_sub_sections(
             self, section: 'MSection', sub_section_def: 'SubSection') -> Iterable['MSection']:
-        """ Retrieves  all sub sections of the given sub section definition. """
+        ''' Retrieves  all sub sections of the given sub section definition. '''
         raise NotImplementedError()
 
     def m_sub_section_count(self, section: 'MSection', sub_section_def: 'SubSection') -> int:
-        """ Returns the number of sub sections for the given sub section definition. """
+        ''' Returns the number of sub sections for the given sub section definition. '''
         raise NotImplementedError()
 
 
 class MDataDict(MData):
-    """ A simple dict backed implementaton of :class:`MData`. It is used by default. """
+    ''' A simple dict backed implementaton of :class:`MData`. It is used by default. '''
 
     def __init__(self, dct: Dict[str, Any] = None):
         if dct is None:
@@ -484,6 +508,15 @@ class MDataDict(MData):
         else:
             self.dct[sub_section_name] = sub_section
 
+    def m_remove_sub_section(
+            self, section: 'MSection', sub_section_def: 'SubSection', index: int) -> None:
+
+        if sub_section_def.repeats:
+            del(self.dct[sub_section_def.name][index])
+
+        elif sub_section_def.name in self.dct:
+            del(self.dct[sub_section_def.name])
+
     def m_get_sub_section(
             self, section: 'MSection', sub_section_def: 'SubSection',
             index: int) -> 'MSection':
@@ -510,17 +543,17 @@ class MDataDict(MData):
 
 
 class MResource():
-    """Represents a collection of related metainfo data, i.e. a set of :class:`MSection` instances.
+    '''Represents a collection of related metainfo data, i.e. a set of :class:`MSection` instances.
 
     MResource allows to keep related objects together and resolve sections of certain
     section definitions.
-    """
+    '''
     def __init__(self):
         self.__data: Dict['Section', List['MSection']] = dict()
         self.contents: List['MSection'] = []
 
     def create(self, section_cls: Type[MSectionBound], *args, **kwargs) -> MSectionBound:
-        """ Create an instance of the given section class and adds it to this resource. """
+        ''' Create an instance of the given section class and adds it to this resource. '''
         result = section_cls(*args, **kwargs)
         self.add(result)
         return cast(MSectionBound, result)
@@ -539,11 +572,11 @@ class MResource():
             self.contents.remove(section)
 
     def all(self, section_cls: Type[MSectionBound]) -> List[MSectionBound]:
-        """ Returns all instances of the given section class in this resource. """
+        ''' Returns all instances of the given section class in this resource. '''
         return cast(List[MSectionBound], self.__data.get(section_cls.m_def, []))
 
     def unload(self):
-        """ Breaks all references among the contain metainfo sections to allow GC. """
+        ''' Breaks all references among the contain metainfo sections to allow GC. '''
         for collections in self.__data.values():
             for section in collections:
                 section.m_parent = None
@@ -552,8 +585,8 @@ class MResource():
         # TODO break actual references via quantities
 
 
-class MSection(metaclass=MObjectMeta):
-    """Base class for all section instances on all meta-info levels.
+class MSection(metaclass=MObjectMeta):  # TODO find a way to make this a subclass of collections.abs.Mapping
+    '''Base class for all section instances on all meta-info levels.
 
     All `section instances` indirectly instantiate the :class:`MSection` and therefore all
     members of :class:`MSection` are available on all `section instances`. :class:`MSection`
@@ -585,7 +618,7 @@ class MSection(metaclass=MObjectMeta):
 
         m_resource: The :class:`MResource` that contains and manages this section.
 
-    """
+    '''
 
     m_def: 'Section' = None
 
@@ -689,7 +722,7 @@ class MSection(metaclass=MObjectMeta):
         constraints: Set[str] = set()
         event_handlers: Set[Callable] = set(m_def.event_handlers)
         for name, attr in cls.__dict__.items():
-            # transfer names and descriptions for properties
+            # transfer names and descriptions for properties, init properties
             if isinstance(attr, Property):
                 attr.name = name
                 if attr.description is not None:
@@ -703,6 +736,8 @@ class MSection(metaclass=MObjectMeta):
                 else:
                     raise NotImplementedError('Unknown property kind.')
 
+                attr.__init_property__()
+
             if inspect.isfunction(attr):
                 method_name = attr.__name__
 
@@ -856,7 +891,7 @@ class MSection(metaclass=MObjectMeta):
         return self.__check_np(quantity_def, value)
 
     def m_set(self, quantity_def: 'Quantity', value: Any) -> None:
-        """ Set the given value for the given quantity. """
+        ''' Set the given value for the given quantity. '''
         quantity_def = self.__resolve_synonym(quantity_def)
 
         if quantity_def.derived is not None:
@@ -890,7 +925,7 @@ class MSection(metaclass=MObjectMeta):
                 handler(self, quantity_def, value)
 
     def m_get(self, quantity_def: 'Quantity') -> Any:
-        """ Retrieve the given value for the given quantity. """
+        ''' Retrieve the given value for the given quantity. '''
         quantity_def = self.__resolve_synonym(quantity_def)
         if quantity_def.derived is not None:
             try:
@@ -918,6 +953,10 @@ class MSection(metaclass=MObjectMeta):
                     'Only numpy arrays and dtypes can be used for higher dimensional '
                     'quantities.')
 
+            if isinstance(quantity_def.type, Reference):
+                # save the resolved values for the next access to avoid re-resolve
+                self.m_data.m_set(self, quantity_def, value)
+
         elif type(quantity_def.type) == np.dtype:
             if quantity_def.unit is not None:
                 value = value * quantity_def.unit
@@ -925,7 +964,7 @@ class MSection(metaclass=MObjectMeta):
         return value
 
     def m_is_set(self, quantity_def: 'Quantity') -> bool:
-        """ True if the given quantity is set. """
+        ''' True if the given quantity is set. '''
         quantity_def = self.__resolve_synonym(quantity_def)
         if quantity_def.derived is not None:
             return True
@@ -933,15 +972,25 @@ class MSection(metaclass=MObjectMeta):
         return self.m_data.m_is_set(self, quantity_def)
 
     def m_add_values(self, quantity_def: 'Quantity', values: Any, offset: int) -> None:
-        """ Add (partial) values for the given quantity of higher dimensionality. """
+        ''' Add (partial) values for the given quantity of higher dimensionality. '''
         self.m_data.m_add_values(self, quantity_def, values, offset)
 
     def m_add_sub_section(self, sub_section_def: 'SubSection', sub_section: 'MSection') -> None:
-        """ Adds the given section instance as a sub section of the given sub section definition. """
+        ''' Adds the given section instance as a sub section of the given sub section definition. '''
 
         parent_index = -1
         if sub_section_def.repeats:
             parent_index = self.m_sub_section_count(sub_section_def)
+
+        else:
+            old_sub_section = self.m_data.m_get_sub_section(self, sub_section_def, -1)
+            if old_sub_section is not None:
+                old_sub_section.m_parent = None
+                old_sub_section.m_parent_sub_section = None
+                old_sub_section.m_parent_index = -1
+                if self.m_resource is not None:
+                    self.m_resource.remove(sub_section)
+
         sub_section.m_parent = self
         sub_section.m_parent_sub_section = sub_section_def
         sub_section.m_parent_index = parent_index
@@ -956,29 +1005,33 @@ class MSection(metaclass=MObjectMeta):
             if handler.__name__.startswith('on_add_sub_section'):
                 handler(self, sub_section_def, sub_section)
 
+    def m_remove_sub_section(self, sub_section_def: 'SubSection', index: int) -> None:
+        ''' Removes the exiting section for a non repeatable sub section '''
+        self.m_data.m_remove_sub_section(self, sub_section_def, index)
+
     def m_get_sub_section(self, sub_section_def: 'SubSection', index: int) -> 'MSection':
-        """ Retrieves a single sub section of the given sub section definition. """
+        ''' Retrieves a single sub section of the given sub section definition. '''
         return self.m_data.m_get_sub_section(self, sub_section_def, index)
 
     def m_get_sub_sections(self, sub_section_def: 'SubSection') -> Iterable['MSection']:
-        """ Retrieves  all sub sections of the given sub section definition. """
+        ''' Retrieves  all sub sections of the given sub section definition. '''
         return self.m_data.m_get_sub_sections(self, sub_section_def)
 
     def m_sub_section_count(self, sub_section_def: 'SubSection') -> int:
-        """ Returns the number of sub sections for the given sub section definition. """
+        ''' Returns the number of sub sections for the given sub section definition. '''
         return self.m_data.m_sub_section_count(self, sub_section_def)
 
     def m_create(
             self, section_cls: Type[MSectionBound], sub_section_def: 'SubSection' = None,
             **kwargs) -> MSectionBound:
-        """ Creates a section instance and adds it to this section provided there is a
+        ''' Creates a section instance and adds it to this section provided there is a
         corresponding sub section.
 
         Args:
             section_cls: The section class for the sub-secton to create
             sub_section_def: If there are multiple sub-sections for the given class,
                 this must be used to explicitely state the sub-section definition.
-        """
+        '''
 
         section_def = section_cls.m_def
         sub_section_defs = self.m_def.all_sub_sections_by_section.get(section_def, [])
@@ -1005,7 +1058,7 @@ class MSection(metaclass=MObjectMeta):
         return cast(MSectionBound, sub_section)
 
     def m_update(self, safe: bool = True, **kwargs):
-        """ Updates all quantities and sub-sections with the given arguments. """
+        ''' Updates all quantities and sub-sections with the given arguments. '''
         if safe:
             for name, value in kwargs.items():
                 prop = self.m_def.all_properties.get(name, None)
@@ -1029,15 +1082,22 @@ class MSection(metaclass=MObjectMeta):
             self.m_data.m_data.dct.update(**kwargs)  # type: ignore
 
     def m_as(self, section_cls: Type[MSectionBound]) -> MSectionBound:
-        """ 'Casts' this section to the given extending sections. """
+        ''' 'Casts' this section to the given extending sections. '''
         return cast(MSectionBound, self)
 
     def m_follows(self, definition: 'Section') -> bool:
-        """ Determines if this section's definition is or is derived from the given definition. """
+        ''' Determines if this section's definition is or is derived from the given definition. '''
         return self.m_def == definition or definition in self.m_def.all_base_sections
 
-    def m_to_dict(self, with_meta: bool = False) -> Dict[str, Any]:
-        """Returns the data of this section as a json serializeable dictionary. """
+    def m_to_dict(self, with_meta: bool = False, include_defaults: bool = False) -> Dict[str, Any]:
+        '''
+        Returns the data of this section as a json serializeable dictionary.
+
+        Arguments:
+            with_meta: Include information about the section definition and the sections
+                position in its parent.
+            include_defaults: Include default values of unset quantities.
+        '''
 
         def items() -> Iterable[Tuple[str, Any]]:
             # metadata
@@ -1050,81 +1110,100 @@ class MSection(metaclass=MObjectMeta):
 
             # quantities
             for name, quantity in self.m_def.all_quantities.items():
-                if quantity.virtual or not self.m_is_set(quantity):
+                if quantity.virtual:
                     continue
 
-                if self.m_is_set(quantity) and quantity.derived is None:
-                    serialize: TypingCallable[[Any], Any] = str
-                    if isinstance(quantity.type, DataType):
+                is_set = self.m_is_set(quantity)
+                if not is_set:
+                    if not include_defaults or not quantity.m_is_set(Quantity.default):
+                        continue
 
-                        def data_type_serialize(value):
-                            return quantity.type.serialize(self, quantity, value)
+                quantity_type = quantity.type
 
-                        serialize = data_type_serialize
+                serialize: TypingCallable[[Any], Any] = str
+                if isinstance(quantity_type, Reference):
 
-                    elif quantity.type in [str, int, float, bool]:
-                        serialize = quantity.type
+                    def reference_serialize(value):
+                        if isinstance(value, MProxy):
+                            return value.url
+                        else:
+                            return quantity_type.serialize(self, quantity, value)
 
-                    elif type(quantity.type) == np.dtype:
-                        pass
+                    serialize = reference_serialize
 
-                    elif isinstance(quantity.type, MEnum):
-                        pass
+                elif isinstance(quantity_type, DataType):
 
-                    elif quantity.type == Any:
-                        def _serialize(value: Any):
-                            if type(value) not in [str, int, float, bool, list, type(None)]:
-                                raise MetainfoError(
-                                    'Only python primitives are allowed for Any typed non '
-                                    'virtual quantities: %s of quantity %s in section %s' %
-                                    (value, quantity, self))
+                    def data_type_serialize(value):
+                        return quantity_type.serialize(self, quantity, value)
 
-                            return value
+                    serialize = data_type_serialize
 
-                        serialize = _serialize
+                elif quantity_type in [str, int, float, bool]:
+                    serialize = quantity_type
 
-                    else:
-                        raise MetainfoError(
-                            'Do not know how to serialize data with type %s for quantity %s' %
-                            (quantity.type, quantity))
+                elif type(quantity_type) == np.dtype:
+                    pass
+
+                elif isinstance(quantity_type, MEnum):
+                    pass
+
+                elif quantity_type == Any:
+                    def _serialize(value: Any):
+                        if type(value) not in [str, int, float, bool, list, type(None)]:
+                            raise MetainfoError(
+                                'Only python primitives are allowed for Any typed non '
+                                'virtual quantities: %s of quantity %s in section %s' %
+                                (value, quantity, self))
+
+                        return value
 
+                    serialize = _serialize
+
+                else:
+                    raise MetainfoError(
+                        'Do not know how to serialize data with type %s for quantity %s' %
+                        (quantity_type, quantity))
+
+                if is_set:
                     value = cast(MDataDict, self.m_data).dct[name]
+                else:
+                    value = quantity.default
 
-                    if type(quantity.type) == np.dtype:
-                        serializable_value = value.tolist()
+                if type(quantity_type) == np.dtype:
+                    serializable_value = value.tolist()
 
+                else:
+                    if len(quantity.shape) == 0:
+                        serializable_value = serialize(value)
+                    elif len(quantity.shape) == 1:
+                        serializable_value = [serialize(i) for i in value]
                     else:
-                        if len(quantity.shape) == 0:
-                            serializable_value = serialize(value)
-                        elif len(quantity.shape) == 1:
-                            serializable_value = [serialize(i) for i in value]
-                        else:
-                            raise NotImplementedError('Higher shapes (%s) not supported: %s' % (quantity.shape, quantity))
+                        raise NotImplementedError('Higher shapes (%s) not supported: %s' % (quantity.shape, quantity))
 
-                    yield name, serializable_value
+                yield name, serializable_value
 
             # sub sections
             for name, sub_section_def in self.m_def.all_sub_sections.items():
                 if sub_section_def.repeats:
                     if self.m_sub_section_count(sub_section_def) > 0:
                         yield name, [
-                            item.m_to_dict()
+                            item.m_to_dict(with_meta=with_meta, include_defaults=include_defaults)
                             for item in self.m_get_sub_sections(sub_section_def)]
                 else:
                     sub_section = self.m_get_sub_section(sub_section_def, -1)
                     if sub_section is not None:
-                        yield name, sub_section.m_to_dict()
+                        yield name, sub_section.m_to_dict(with_meta=with_meta, include_defaults=include_defaults)
 
         return {key: value for key, value in items()}
 
     @classmethod
     def m_from_dict(cls: Type[MSectionBound], dct: Dict[str, Any]) -> MSectionBound:
-        """ Creates a section from the given serializable data dictionary.
+        ''' Creates a section from the given serializable data dictionary.
 
         This is the 'opposite' of :func:`m_to_dict`. It takes a deserialised dict, e.g
         loaded from JSON, and turns it into a proper section, i.e. instance of the given
         section class.
-        """
+        '''
 
         section_def = cls.m_def
 
@@ -1173,11 +1252,11 @@ class MSection(metaclass=MObjectMeta):
         return section
 
     def m_to_json(self, **kwargs):
-        """ Returns the data of this section as a json string. """
+        ''' Returns the data of this section as a json string. '''
         return json.dumps(self.m_to_dict(), **kwargs)
 
     def m_all_contents(self) -> Iterable['MSection']:
-        """ Returns an iterable over all sub and sub subs sections. """
+        ''' Returns an iterable over all sub and sub subs sections. '''
         for content in self.m_contents():
             for sub_content in content.m_all_contents():
                 yield sub_content
@@ -1185,7 +1264,7 @@ class MSection(metaclass=MObjectMeta):
             yield content
 
     def m_contents(self) -> Iterable['MSection']:
-        """ Returns an iterable over all direct subs sections. """
+        ''' Returns an iterable over all direct subs sections. '''
         for sub_section_def in self.m_def.all_sub_sections.values():
             if sub_section_def.repeats:
                 index = 0
@@ -1198,7 +1277,7 @@ class MSection(metaclass=MObjectMeta):
                 yield sub_section
 
     def m_path(self, quantity_def: 'Quantity' = None) -> str:
-        """ Returns the path of this section or the given quantity within the section hierarchy. """
+        ''' Returns the path of this section or the given quantity within the section hierarchy. '''
         if self.m_parent is None:
             return '/'
 
@@ -1213,19 +1292,21 @@ class MSection(metaclass=MObjectMeta):
         return '%s/%s' % (self.m_parent.m_path().rstrip('/'), segment)
 
     def m_root(self, cls: Type[MSectionBound] = None) -> MSectionBound:
-        """ Returns the first parent of the parent section that has no parent; the root. """
+        ''' Returns the first parent of the parent section that has no parent; the root. '''
         if self.m_parent is None:
             return cast(MSectionBound, self)
         else:
             return self.m_parent.m_root(cls)
 
     def m_parent_as(self, cls: Type[MSectionBound] = None) -> MSectionBound:
-        """ Returns the parent section with the given section class type. """
+        ''' Returns the parent section with the given section class type. '''
         return cast(MSectionBound, self.m_parent)
 
     def m_resolve(self, path: str, cls: Type[MSectionBound] = None) -> MSectionBound:
-        """ Resolves the given path using this section as context. """
-
+        '''
+        Resolves the given path or dotted quantity name using this section as context and
+        returns the sub_section or value.
+        '''
         if path.startswith('/'):
             context: 'MSection' = self.m_root()
         else:
@@ -1233,7 +1314,7 @@ class MSection(metaclass=MObjectMeta):
 
         path_stack = path.strip('/').split('/')
         path_stack.reverse()
-        while len(path_stack) > 1:
+        while len(path_stack) > 0:
             prop_name = path_stack.pop()
             prop_def = context.m_def.all_properties.get(prop_name, None)
 
@@ -1275,7 +1356,7 @@ class MSection(metaclass=MObjectMeta):
         return cast(MSectionBound, context)
 
     def m_x(self, key: str, default=None):
-        """ Convinience method for get the annotation with name ``key``. """
+        ''' Convinience method for get the annotation with name ``key``. '''
         return self.m_annotations.get(key, default)
 
     def __validate_shape(self, quantity_def: 'Quantity', value):
@@ -1301,7 +1382,7 @@ class MSection(metaclass=MObjectMeta):
         return True
 
     def m_validate(self):
-        """ Evaluates all constraints and shapes of this section and returns a list of errors. """
+        ''' Evaluates all constraints and shapes of this section and returns a list of errors. '''
         errors: List[str] = []
         for constraint_name in self.m_def.constraints:
             constraint = getattr(self, 'c_%s' % constraint_name, None)
@@ -1327,7 +1408,7 @@ class MSection(metaclass=MObjectMeta):
         return errors
 
     def m_all_validate(self):
-        """ Evaluates all constraints in the whole section hierarchy, incl. this section. """
+        ''' Evaluates all constraints in the whole section hierarchy, incl. this section. '''
         errors: List[str] = []
         for section in itertools.chain([self], self.m_all_contents()):
             for error in section.m_validate():
@@ -1347,6 +1428,16 @@ class MSection(metaclass=MObjectMeta):
 
         return '%s:%s' % (name, m_section_name)
 
+    def __getitem__(self, key):
+        key = key.replace('.', '/')
+        return self.m_resolve(key)
+
+    def __iter__(self):
+        return self.m_def.all_properties.__iter__()
+
+    def __len__(self):
+        return len(self.m_def.all_properties)
+
 
 class MCategory(metaclass=MObjectMeta):
 
@@ -1374,7 +1465,7 @@ class MCategory(metaclass=MObjectMeta):
 # Metainfo M3 (i.e. definitions of definitions)
 
 class Definition(MSection):
-    """ A common base for all metainfo definitions.
+    ''' A common base for all metainfo definitions.
 
     All metainfo `definitions` (sections, quantities, sub-sections, packages, ...) share
     some common attributes. These are defined in a common base: all
@@ -1403,7 +1494,7 @@ class Definition(MSection):
     Additional helper functions for `definitions`:
 
     .. automethod:: all_definitions
-    """
+    '''
 
     __all_definitions: Dict[Type[MSection], List[MSection]] = {}
 
@@ -1423,11 +1514,11 @@ class Definition(MSection):
 
     @classmethod
     def all_definitions(cls: Type[MSectionBound]) -> Iterable[MSectionBound]:
-        """ Class method that returns all definitions of this class.
+        ''' Class method that returns all definitions of this class.
 
         This can be used to get a list of all globally available `defintions` or a certain
         kind. E.g. to get all `quantities`: ``Quantity.all_definitions()``.
-        """
+        '''
         return cast(Iterable[MSectionBound], Definition.__all_definitions.get(cls, []))
 
     def qualified_name(self):
@@ -1449,11 +1540,14 @@ class Definition(MSection):
 
 
 class Property(Definition):
-    pass
+
+    def __init_property__(self):
+        ''' Is called during section initialisation to allow property initialisation '''
+        pass
 
 
 class Quantity(Property):
-    """ Definition of an atomic piece of data.
+    ''' Definition of an atomic piece of data.
 
     Quantity definitions are the main building block of meta-info schemas. Each quantity
     represents a single piece of data.
@@ -1551,7 +1645,7 @@ class Quantity(Property):
 
         is_scalar:
             Derived quantity that is True, iff this quantity has shape of length 0
-        """
+        '''
 
     type: 'Quantity' = None
     shape: 'Quantity' = None
@@ -1564,6 +1658,10 @@ class Quantity(Property):
 
     # TODO derived_from = Quantity(type=Quantity, shape=['0..*'])
 
+    def __init_property__(self):
+        if self.derived is not None:
+            self.virtual = True
+
     def __get__(self, obj, cls):
         if obj is None:
             # class (def) attribute case
@@ -1610,7 +1708,7 @@ class Quantity(Property):
 
 
 class DirectQuantity(Quantity):
-    """ Used for quantities that would cause indefinite loops due to bootstrapping. """
+    ''' Used for quantities that would cause indefinite loops due to bootstrapping. '''
 
     def __init__(self, **kwargs):
         super().__init__(**kwargs)
@@ -1637,7 +1735,7 @@ class DirectQuantity(Quantity):
 
 
 class SubSection(Property):
-    """ Defines what sections can appear as sub-sections of another section.
+    ''' Defines what sections can appear as sub-sections of another section.
 
     Like quantities, sub-sections are defined in a `section class` as attributes
     of this class. An like quantities, each sub-section definition becomes a property of
@@ -1656,7 +1754,7 @@ class SubSection(Property):
 
         repeats: A boolean that determines wether this sub-section can appear multiple
             times in the parent section.
-    """
+    '''
 
     sub_section: 'Quantity' = None
     repeats: 'Quantity' = None
@@ -1674,14 +1772,24 @@ class SubSection(Property):
                 return obj.m_get_sub_section(self, -1)
 
     def __set__(self, obj, value):
-        raise NotImplementedError('Sub sections cannot be set directly. Use m_create.')
+        if obj is None:
+            raise NotImplementedError()
+
+        if self.repeats:
+            raise NotImplementedError('Cannot set a repeating sub section use m_create or m_add_sub_section.')
+
+        else:
+            if value is None:
+                obj.m_remove_sub_section(self, -1)
+            else:
+                obj.m_add_sub_section(self, value)
 
     def __delete__(self, obj):
         raise NotImplementedError('Deleting sub sections is not supported.')
 
 
 class Section(Definition):
-    """ Sections define blocks of related quantities and allows hierarchical data.
+    ''' Sections define blocks of related quantities and allows hierarchical data.
 
     Section definitions determine what quantities and sub-sections can appear in a
     following section instance.
@@ -1756,7 +1864,7 @@ class Section(Definition):
         parent_section_sub_section_defs:
             A helper attribute that gives all sub-section definitions that this section
             is used in.
-    """
+    '''
 
     section_cls: Type[MSection] = None
 
@@ -1820,7 +1928,7 @@ class Section(Definition):
 
 
 class Package(Definition):
-    """ Packages organize metainfo defintions alongside Python modules
+    ''' Packages organize metainfo defintions alongside Python modules
 
     Each Python module with metainfo Definition (explicitely or implicitely) has a member
     ``m_package`` with an instance of this class. Definitions (categories, sections) in
@@ -1843,7 +1951,7 @@ class Package(Definition):
 
         all_definitions: A helper attribute that provides all section definitions
             by name.
-    """
+    '''
 
     section_definitions: 'SubSection' = None
     category_definitions: 'SubSection' = None
@@ -1874,7 +1982,7 @@ class Package(Definition):
 
 
 class Category(Definition):
-    """ Categories allow to organize metainfo definitions (not metainfo data like sections do)
+    ''' Categories allow to organize metainfo definitions (not metainfo data like sections do)
 
     Each definition, including categories themselves, can belong to a set of categories.
     Categories therefore form a hierarchy of concepts that definitions can belong to, i.e.
@@ -1883,7 +1991,7 @@ class Category(Definition):
     Args:
         definitions: A helper attribute that gives all definitions that are directly or
             indirectly in this category.
-    """
+    '''
 
     def __init__(self, *args, **kwargs):
         super().__init__(*args, **kwargs)
@@ -1955,7 +2063,7 @@ SubSection.__init_cls__()
 
 
 class Environment(MSection):
-    """ Environments allow to manage many metainfo packages and quickly access all definitions.
+    ''' Environments allow to manage many metainfo packages and quickly access all definitions.
 
     Environments provide a name-table for large-sets of metainfo definitions that span
     multiple packages. It provides various functions to resolve metainfo definitions by
@@ -1963,7 +2071,7 @@ class Environment(MSection):
 
     Args:
         packages: Packages in this environment.
-    """
+    '''
 
     packages = SubSection(sub_section=Package, repeats=True)
 
diff --git a/nomad/metainfo/mongoengine.py b/nomad/metainfo/mongoengine.py
index 47455a28d6..020704f9db 100644
--- a/nomad/metainfo/mongoengine.py
+++ b/nomad/metainfo/mongoengine.py
@@ -12,14 +12,14 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 Adds mongoengine supports to the metainfo. Allows to create, save, and get metainfo
 sections from mongoengine. Currently no sub-section support. The annotation key is "a_me",
 the annotation object support the following keys:
 
 - ``primary_key``: *Bool*, renders the quantity to be the primary key.
 - ``index``: *Bool*, adds this quantity to the index
-"""
+'''
 
 from typing import Any, Dict
 import mongoengine as me
diff --git a/nomad/metainfo/optimade.py b/nomad/metainfo/optimade.py
index 62378b1fab..18d45824c9 100644
--- a/nomad/metainfo/optimade.py
+++ b/nomad/metainfo/optimade.py
@@ -1,11 +1,13 @@
 from ase.data import chemical_symbols
-from elasticsearch_dsl import Keyword, Integer, Float, InnerDoc, Nested
+from elasticsearch_dsl import Keyword, Float, InnerDoc, Nested
 import numpy as np
 
 from . import MSection, Section, Quantity, SubSection, MEnum, units
-from .elastic import elastic_mapping
+from .search import SearchQuantity
 
 
+# TODO move the module
+
 def optimade_links(section: str):
     return [
         'https://github.com/Materials-Consortia/OPTiMaDe/blob/develop/optimade.md#%s' %
@@ -29,11 +31,11 @@ class Optimade():
 
 
 class Species(MSection):
-    """
+    '''
     Used to describe the species of the sites of this structure. Species can be pure
     chemical elements, or virtual-crystal atoms representing a statistical occupation of a
     given site by multiple chemical elements.
-    """
+    '''
 
     m_def = Section(links=optimade_links('h.6.2.13'))
 
@@ -96,13 +98,12 @@ class Species(MSection):
 class OptimadeEntry(MSection):
     m_def = Section(
         links=optimade_links('h.6.2'),
-        a_flask=dict(skip_none=True),
-        a_elastic=dict(type=InnerDoc))
+        a_flask=dict(skip_none=True))
 
     elements = Quantity(
         type=MEnum(chemical_symbols), shape=['1..*'],
         links=optimade_links('h.6.2.1'),
-        a_elastic=dict(type=Keyword),
+        a_search=SearchQuantity(),
         a_optimade=Optimade(query=True, entry=True),
         description='''
             Names of the different elements present in the structure.
@@ -111,7 +112,7 @@ class OptimadeEntry(MSection):
     nelements = Quantity(
         type=int,
         links=optimade_links('h.6.2.2'),
-        a_elastic=dict(type=Integer),
+        a_search=SearchQuantity(),
         a_optimade=Optimade(query=True, entry=True),
         description='''
             Number of different elements in the structure as an integer.
@@ -120,7 +121,7 @@ class OptimadeEntry(MSection):
     elements_ratios = Quantity(
         type=float, shape=['nelements'],
         links=optimade_links('h.6.2.3'),
-        a_elastic=dict(type=lambda: Nested(ElementRatio), mapping=ElementRatio.from_structure_entry),
+        a_search=SearchQuantity(es_mapping=Nested(ElementRatio), es_value=ElementRatio.from_structure_entry),
         a_optimade=Optimade(query=True, entry=True),
         description='''
             Relative proportions of different elements in the structure.
@@ -129,7 +130,7 @@ class OptimadeEntry(MSection):
     chemical_formula_descriptive = Quantity(
         type=str,
         links=optimade_links('h.6.2.4'),
-        a_elastic=dict(type=Keyword),
+        a_search=SearchQuantity(),
         a_optimade=Optimade(query=True, entry=True),
         description='''
             The chemical formula for a structure as a string in a form chosen by the API
@@ -139,7 +140,7 @@ class OptimadeEntry(MSection):
     chemical_formula_reduced = Quantity(
         type=str,
         links=optimade_links('h.6.2.5'),
-        a_elastic=dict(type=Keyword),
+        a_search=SearchQuantity(),
         a_optimade=Optimade(query=True, entry=True),
         description='''
             The reduced chemical formula for a structure as a string with element symbols and
@@ -149,7 +150,7 @@ class OptimadeEntry(MSection):
     chemical_formula_hill = Quantity(
         type=str,
         links=optimade_links('h.6.2.6'),
-        a_elastic=dict(type=Keyword),
+        a_search=SearchQuantity(),
         a_optimade=Optimade(query=True, entry=False),
         description='''
             The chemical formula for a structure in Hill form with element symbols followed by
@@ -159,7 +160,7 @@ class OptimadeEntry(MSection):
     chemical_formula_anonymous = Quantity(
         type=str,
         links=optimade_links('h.6.2.7'),
-        a_elastic=dict(type=Keyword),
+        a_search=SearchQuantity(),
         a_optimade=Optimade(query=True, entry=True),
         description='''
             The anonymous formula is the chemical_formula_reduced, but where the elements are
@@ -171,7 +172,7 @@ class OptimadeEntry(MSection):
     dimension_types = Quantity(
         type=int, shape=[3],
         links=optimade_links('h.6.2.8'),
-        a_elastic=dict(type=Integer, mapping=lambda a: sum(a.dimension_types)),
+        a_search=SearchQuantity(es_value=lambda a: sum(a.dimension_types)),
         a_optimade=Optimade(query=True, entry=True),
         description='''
             List of three integers. For each of the three directions indicated by the three lattice
@@ -201,7 +202,7 @@ class OptimadeEntry(MSection):
     nsites = Quantity(
         type=int,
         links=optimade_links('h.6.2.11'),
-        a_elastic=dict(type=Integer),
+        a_search=SearchQuantity(),
         a_optimade=Optimade(query=True, entry=True), description='''
             An integer specifying the length of the cartesian_site_positions property.
         ''')
@@ -220,7 +221,7 @@ class OptimadeEntry(MSection):
     structure_features = Quantity(
         type=MEnum(['disorder', 'unknown_positions', 'assemblies']), shape=['1..*'],
         links=optimade_links('h.6.2.15'),
-        a_elastic=dict(type=Keyword),
+        a_search=SearchQuantity(),
         a_optimade=Optimade(query=True, entry=True), description='''
             A list of strings that flag which special features are used by the structure.
 
@@ -232,6 +233,3 @@ class OptimadeEntry(MSection):
         ''')
 
     species = SubSection(sub_section=Species.m_def, repeats=True)
-
-
-ESOptimadeEntry = elastic_mapping(OptimadeEntry.m_def, InnerDoc)
diff --git a/nomad/metainfo/search.py b/nomad/metainfo/search.py
new file mode 100644
index 0000000000..45f1d317fa
--- /dev/null
+++ b/nomad/metainfo/search.py
@@ -0,0 +1,116 @@
+from typing import Callable, Any
+
+from nomad import metainfo
+
+
+# TODO multi, split are more flask related
+class SearchQuantity:
+    '''
+    A metainfo quantity annotation class that defines additional properties that determine
+    how to search for the respective quantity. Only quantities that have this will
+    be mapped to elastic search.
+
+    Attributes:
+        name: The name of this search quantity. Will be the name in the elastic index and
+            the name for the search parameter. Default is the metainfo quantity name.
+        many_or: Indicates that an 'or' (es terms) search is performed if many values are given.
+            Otherwise an 'and' (es bool->should->match) is performed.  Values are 'split' and
+            'append' to indicate how URL search parameters should be treated.
+        many_and: Indicates that many values can be supplied for search. Values are 'split' and
+            'append' to indicate how URL search parameters should be treated.
+        order_default: Indicates that this quantity is used to order search results
+            if no other ordering was specificed.
+        metric: Quantity can be used to build statistics. Statistics provide a metric
+            value for each value of the quantity. E.g. number of datasets with a given atom label.
+            This defines a metric based on this quantity. Values need to be a valid
+            elastic search aggregation (e.g. sum, cardinality, etc.).
+        metric_name: If this quantity is indicated to function as a metric, the metric
+            needs a name. By default the quantities name is used.
+        default_statistic: Indicates this quantity to be part of the default statistics.
+        statistics_size:
+            The maximum number of values in a statistic. Default is 10.
+        group: Indicates that his quantity can be used to group results. The value will
+            be the name of the group.
+        es_quantity: The quantity in the elastic mapping that is used to search. This is
+            especially useful if the quantity represents a inner document and only one
+            quantity of this inner object is used. Default is the name of the quantity.
+        es_mapping: A valid elasticsearch_dsl mapping. Default is ``Keyword()``.
+        es_value: A callable that is applied to section to get a value for this quantity in the elastic index.
+        derived: A callable that is applied to search parameter values before search.
+    '''
+
+    def __init__(
+            self,
+            name: str = None, description: str = None,
+            many_and: str = None, many_or: str = None,
+            order_default: bool = False,
+            group: str = None, metric: str = None, metric_name: str = None,
+            default_statistic: bool = False,
+            statistic_size: int = 10,
+            es_quantity: str = None,
+            es_mapping: Any = None,
+            es_value: Callable[[Any], Any] = None,
+            derived: Callable[[Any], Any] = None):
+
+        self.name = name
+        self.description = description
+        self.many_and = many_and
+        self.many_or = many_or
+        self.order_default = order_default
+        self.group = group
+        self.default_statistic = default_statistic
+        self.metric = metric
+        self.metric_name = metric_name
+        self.statistic_size = statistic_size
+        self.es_quantity = es_quantity
+        self.es_mapping = es_mapping
+        self.es_value = es_value
+        self.derived = derived
+
+        self.prefix: str = None
+        self.qualified_name: str = None
+
+        assert many_and is None or many_or is None, 'A search quantity can only be used for multi or many search'
+        assert many_and in [None, 'split', 'append'], 'Only split and append are valid values'
+        assert many_or in [None, 'split', 'append'], 'Only split and append are valid values'
+
+    def configure(self, quantity: metainfo.Quantity, prefix: str = None):
+        if self.name is None:
+            self.name = quantity.name
+
+        if self.description is None:
+            self.description = quantity.description
+
+        if prefix is not None:
+            self.qualified_name = '%s.%s' % (prefix, self.name)
+            if self.es_quantity is not None:
+                self.es_quantity = '%s.%s' % (prefix, self.es_quantity)
+            if self.metric_name is not None:
+                self.metric_name = '%s.%s' % (prefix, self.metric_name)
+            if self.group is not None:
+                self.group = '%s.%s' % (prefix, self.group)
+        else:
+            self.qualified_name = self.name
+
+        if self.es_quantity is None:
+            self.es_quantity = self.qualified_name
+        if self.metric_name is None and self.metric is not None:
+            self.metric_name = self.qualified_name
+
+    @property
+    def argparse_action(self):
+        if self.many_or is not None:
+            return self.many_or
+
+        if self.many_and is not None:
+            return self.many_and
+
+        return None
+
+    @property
+    def many(self):
+        return self.many_and is not None or self.many_or is not None
+
+
+def init(section: metainfo.MSection):
+    pass
diff --git a/nomad/normalizing/__init__.py b/nomad/normalizing/__init__.py
index 20502f9951..cd527d48da 100644
--- a/nomad/normalizing/__init__.py
+++ b/nomad/normalizing/__init__.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 After parsing calculations have to be normalized with a set of *normalizers*.
 In NOMAD-coe those were programmed in python (we'll reuse) and scala (we'll rewrite).
 
@@ -29,7 +29,7 @@ There is one ABC for all normalizer:
 
 .. autoclass::nomad.normalizing.normalizer.Normalizer
     :members:
-"""
+'''
 
 from typing import List, Any, Iterable, Type
 
diff --git a/nomad/normalizing/data/springer_msgpack.py b/nomad/normalizing/data/springer_msgpack.py
index b0590f20da..9a9bfdda45 100644
--- a/nomad/normalizing/data/springer_msgpack.py
+++ b/nomad/normalizing/data/springer_msgpack.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 Generates and queries a msgpack database of springer-related quantities downloaded from
 http://materials.springer.com. The database is stuctured as
 
@@ -21,7 +21,7 @@ space_group_number : normalized_formula : springer_id : entry
 The msgpack file can be queried using ArchiveFileDB.
 
 The html parser was taken from a collection of scripts from FHI without further testing.
-"""
+'''
 
 import requests
 import re
@@ -89,9 +89,9 @@ def normalize_formula(formula_str: str) -> str:
 
 
 def parse(htmltext: str) -> Dict[str, str]:
-    """
+    '''
     Parser the quantities in required_items from an html text.
-    """
+    '''
     soup = BeautifulSoup(htmltext, "html.parser")
     results = {}
     for item in soup.find_all(attrs={"class": "data-list__item"}):
@@ -158,10 +158,10 @@ def _download(path: str, max_n_query: int = 10) -> str:
 
 
 def download_springer_data(max_n_query: int = 10):
-    """
+    '''
     Downloads the springer quantities related to a structure from springer and updates
     database.
-    """
+    '''
     # load database
     # querying database with unvailable dataset leads to error,
     # get toc keys first by making an empty query
@@ -219,9 +219,9 @@ def download_springer_data(max_n_query: int = 10):
 
 
 def query_springer_data(normalized_formula: str, space_group_number: int) -> Dict[str, Any]:
-    """
+    '''
     Queries a msgpack database for springer-related quantities.
-    """
+    '''
     entries = query_archive(DB_NAME, {str(space_group_number): {normalized_formula: '*'}})
     db_dict = {}
     entries = entries.get(str(space_group_number), {}).get(normalized_formula, {})
diff --git a/nomad/normalizing/normalizer.py b/nomad/normalizing/normalizer.py
index 2ca1efbf90..ea0d70f2bf 100644
--- a/nomad/normalizing/normalizer.py
+++ b/nomad/normalizing/normalizer.py
@@ -20,16 +20,16 @@ from nomad.utils import get_logger
 
 
 class Normalizer(metaclass=ABCMeta):
-    """
+    '''
     A base class for normalizers. Normalizers work on a :class:`AbstractParserBackend` instance
     for read and write. Normalizer instances are reused.
 
     Arguments:
         backend: The backend used to read and write data from and to.
-    """
+    '''
 
     domain = 'dft'
-    """ The domain this normalizer should be used in. Default for all normalizer is 'DFT'. """
+    ''' The domain this normalizer should be used in. Default for all normalizer is 'DFT'. '''
 
     def __init__(self, backend: AbstractParserBackend) -> None:
         self._backend = backend
@@ -42,7 +42,7 @@ class Normalizer(metaclass=ABCMeta):
 
 
 class SystemBasedNormalizer(Normalizer, metaclass=ABCMeta):
-    """
+    '''
     A normalizer base class for normalizers that only touch a section_system.
 
     The normalizer is run on all section systems in a run. However, some systems,
@@ -51,7 +51,7 @@ class SystemBasedNormalizer(Normalizer, metaclass=ABCMeta):
 
     Args:
         only_representatives: Will only normalize the `representative` systems.
-    """
+    '''
     def __init__(self, backend: AbstractParserBackend, only_representatives: bool = False):
         super().__init__(backend)
         self.only_representatives = only_representatives
@@ -78,15 +78,15 @@ class SystemBasedNormalizer(Normalizer, metaclass=ABCMeta):
 
     @abstractmethod
     def normalize_system(self, section_system_index: int, is_representative: bool) -> bool:
-        """ Normalize the given section and returns True, iff successful"""
+        ''' Normalize the given section and returns True, iff successful'''
         pass
 
     def __representative_system(self):
-        """Used to select a representative system for this entry.
+        '''Used to select a representative system for this entry.
 
         Attempt to find a single section_system that is representative for the
         entry. The selection depends on the type of calculation.
-        """
+        '''
         # Try to find a frame sequence, only first found is considered
         try:
             frame_seq = self._backend['section_frame_sequence'][0]
diff --git a/nomad/normalizing/optimade.py b/nomad/normalizing/optimade.py
index 114b206414..d0ddf643ac 100644
--- a/nomad/normalizing/optimade.py
+++ b/nomad/normalizing/optimade.py
@@ -28,19 +28,19 @@ species_re = re.compile(r'^([A-Z][a-z]?)(\d*)$')
 
 class OptimadeNormalizer(SystemBasedNormalizer):
 
-    """
+    '''
     This normalizer performs all produces a section all data necessary for the Optimade API.
     It assumes that the :class:`SystemNormalizer` was run before.
-    """
+    '''
     def __init__(self, backend):
         super().__init__(backend, only_representatives=True)
 
     def get_optimade_data(self, index) -> OptimadeEntry:
-        """
+        '''
         The 'main' method of this :class:`SystemBasedNormalizer`.
         Normalizes the section with the given `index`.
         Normalizes geometry, classifies, system_type, and runs symmetry analysis.
-        """
+        '''
         optimade = OptimadeEntry()
 
         def get_value(key: str, default: Any = None, numpy: bool = False, unit=None) -> Any:
diff --git a/nomad/normalizing/structure.py b/nomad/normalizing/structure.py
index 5632d16f11..b1661c5661 100644
--- a/nomad/normalizing/structure.py
+++ b/nomad/normalizing/structure.py
@@ -32,7 +32,7 @@ if old_symmetry_tolerance != symmetry_tolerance:
 
 
 def get_normalized_wyckoff(atomic_numbers: np.array, wyckoff_letters: np.array) -> Dict[str, Dict[str, int]]:
-    """Returns a normalized Wyckoff sequence for the given atomic numbers and
+    '''Returns a normalized Wyckoff sequence for the given atomic numbers and
     corresponding wyckoff letters. In a normalized sequence the chemical
     species are "anonymized" by replacing them with upper case alphabets.
 
@@ -45,7 +45,7 @@ def get_normalized_wyckoff(atomic_numbers: np.array, wyckoff_letters: np.array)
         dictionary. The dictionary contains the number of atoms for each
         species, where the species names have been anomymized in the form
         "X_<index>".
-    """
+    '''
     # Count the occurrence of each chemical species
     atom_count: Dict[int, int] = {}
     for atomic_number in atomic_numbers:
@@ -106,7 +106,7 @@ def get_normalized_wyckoff(atomic_numbers: np.array, wyckoff_letters: np.array)
 
 
 def search_aflow_prototype(space_group: int, norm_wyckoff: dict) -> dict:
-    """Searches the AFLOW prototype library for a match for the given space
+    '''Searches the AFLOW prototype library for a match for the given space
     group and normalized Wyckoff sequence. The normalized Wyckoff sequence is
     assumed to come from the MatID symmetry routine.
 
@@ -121,7 +121,7 @@ def search_aflow_prototype(space_group: int, norm_wyckoff: dict) -> dict:
 
     Returns:
         Dictionary containing the AFLOW prototype information.
-    """
+    '''
     structure_type_info = None
     type_descriptions = aflow_prototypes["prototypes_by_spacegroup"].get(space_group, [])
     for type_description in type_descriptions:
diff --git a/nomad/normalizing/system.py b/nomad/normalizing/system.py
index e91043ab88..eaca47f82f 100644
--- a/nomad/normalizing/system.py
+++ b/nomad/normalizing/system.py
@@ -40,10 +40,10 @@ springer_db_connection = None
 
 
 def open_springer_database():
-    """
+    '''
     Create a global connection to the Springer database in a way that
     each worker opens the database just once.
-    """
+    '''
     global springer_db_connection
     if springer_db_connection is None:
         # filepath definition in 'nomad-FAIR/nomad/config.py'
@@ -59,22 +59,22 @@ def open_springer_database():
 
 
 def normalized_atom_labels(atom_labels):
-    """
+    '''
     Normalizes the given atom labels: they either are labels right away, or contain
     additional numbers (to distinguish same species but different labels, see meta-info),
     or we replace them with ase placeholder atom for unknown elements 'X'.
-    """
+    '''
     return [
         ase.data.chemical_symbols[0] if match is None else match.group(0)
         for match in [re.search(atom_label_re, atom_label) for atom_label in atom_labels]]
 
 
 def formula_normalizer(atoms):
-    """
+    '''
     Reads the chemical symbols in ase.atoms and returns a normalized formula.
     Formula normalization is on the basis of atom counting,
     e.g., Tc ->  Tc100, SZn -> S50Zn50, Co2Nb -> Co67Nb33
-    """
+    '''
     #
     chem_symb = atoms.get_chemical_symbols()
     atoms_counter = Counter(chem_symb)  # dictionary
@@ -91,10 +91,10 @@ def formula_normalizer(atoms):
 
 class SystemNormalizer(SystemBasedNormalizer):
 
-    """
+    '''
     This normalizer performs all system (atoms, cells, etc.) related normalizations
     of the legacy NOMAD-coe *stats* normalizer.
-    """
+    '''
 
     @staticmethod
     def atom_label_to_num(atom_label):
@@ -109,13 +109,13 @@ class SystemNormalizer(SystemBasedNormalizer):
         return 0
 
     def normalize_system(self, index, is_representative) -> bool:
-        """
+        '''
         The 'main' method of this :class:`SystemBasedNormalizer`.
         Normalizes the section with the given `index`.
         Normalizes geometry, classifies, system_type, and runs symmetry analysis.
 
         Returns: True, iff the normalization was successful
-        """
+        '''
 
         def get_value(key: str, default: Any = None, numpy: bool = True) -> Any:
             try:
@@ -262,13 +262,13 @@ class SystemNormalizer(SystemBasedNormalizer):
         return True
 
     def system_type_analysis(self, atoms: Atoms) -> None:
-        """
+        '''
         Determine the system type with MatID. Write the system type to the
         backend.
 
         Args:
             atoms: The structure to analyse
-        """
+        '''
         system_type = config.services.unavailable_value
         if atoms.get_number_of_atoms() <= config.normalize.system_classification_with_clusters_threshold:
             try:
@@ -297,7 +297,7 @@ class SystemNormalizer(SystemBasedNormalizer):
         self._backend.addValue('system_type', system_type)
 
     def symmetry_analysis(self, atoms) -> None:
-        """Analyze the symmetry of the material being simulated.
+        '''Analyze the symmetry of the material being simulated.
 
         We feed in the parsed values in section_system to the
         the symmetry analyzer. We then use the Matid library
@@ -312,7 +312,7 @@ class SystemNormalizer(SystemBasedNormalizer):
         Returns:
             None: The method should write symmetry variables
             to the backend which is member of this class.
-        """
+        '''
         # Try to use Matid's symmetry analyzer to analyze the ASE object.
         try:
             symm = SymmetryAnalyzer(atoms, symmetry_tol=config.normalize.symmetry_tolerance)
@@ -410,7 +410,7 @@ class SystemNormalizer(SystemBasedNormalizer):
 
             # SQL QUERY
             # (this replaces the four queries done in the old 'classify4me_SM_normalizer.py')
-            cur.execute("""
+            cur.execute('''
                 SELECT
                     entry.entry_id,
                     entry.alphabetic_formula,
@@ -425,7 +425,7 @@ class SystemNormalizer(SystemBasedNormalizer):
                 LEFT JOIN reference ON reference.reference_nr = er.entry_nr
                 WHERE entry.normalized_formula = ( %r ) and entry.space_group_number = '%d'
                 GROUP BY entry.entry_id;
-                """ % (normalized_formula, space_group_number))
+                ''' % (normalized_formula, space_group_number))
 
             results = cur.fetchall()
             # 'results' is a list of tuples, i.e. '[(a,b,c,d), ..., (a,b,c,d)]'
@@ -487,14 +487,14 @@ class SystemNormalizer(SystemBasedNormalizer):
                     self.logger.warning('Mismatch in Springer classification or compounds')
 
     def prototypes(self, atom_species: np.array, wyckoffs: np.array, spg_number: int) -> None:
-        """Tries to match the material to an entry in the AFLOW prototype data.
+        '''Tries to match the material to an entry in the AFLOW prototype data.
         If a match is found, a section_prototype is added to section_system.
 
         Args:
             atomic_numbers: Array of atomic numbers.
             wyckoff_letters: Array of Wyckoff letters as strings.
             spg_number: Space group number.
-        """
+        '''
         norm_wyckoff = structure.get_normalized_wyckoff(atom_species, wyckoffs)
         protoDict = structure.search_aflow_prototype(spg_number, norm_wyckoff)
         if protoDict is not None:
diff --git a/nomad/parsing/__init__.py b/nomad/parsing/__init__.py
index 4144ce44a4..7327895df2 100644
--- a/nomad/parsing/__init__.py
+++ b/nomad/parsing/__init__.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 The *parsing* module is an interface for the existing NOMAD-coe parsers.
 This module redefines some of the old NOMAD-coe python-common functionality to create a
 more coherent interface to the parsers.
@@ -69,7 +69,7 @@ based on NOMAD-coe's *python-common* module.
     :members:
 .. autoclass:: nomad.parsing.LocalBackend
     :members:
-"""
+'''
 
 from typing import Callable, IO, Union, Dict
 import magic
@@ -96,7 +96,7 @@ encoding_magic = magic.Magic(mime_encoding=True)
 
 
 def match_parser(mainfile: str, upload_files: Union[str, files.StagingUploadFiles], strict=True) -> 'Parser':
-    """
+    '''
     Performs parser matching. This means it take the given mainfile and potentially
     opens it with the given callback and tries to identify a parser that can parse
     the file.
@@ -111,7 +111,7 @@ def match_parser(mainfile: str, upload_files: Union[str, files.StagingUploadFile
         strict: Only match strict parsers, e.g. no artificial parsers for missing or empty entries.
 
     Returns: The parser, or None if no parser could be matched.
-    """
+    '''
     if mainfile.startswith('.') or mainfile.startswith('~'):
         return None
 
@@ -484,7 +484,7 @@ if config.use_empty_parsers:
 
 parsers.append(BrokenParser())
 
-""" Instantiation and constructor based config of all parsers. """
+''' Instantiation and constructor based config of all parsers. '''
 
 parser_dict = {parser.name: parser for parser in parsers}  # type: ignore
-""" A dict to access parsers by name. Usually 'parsers/<...>', e.g. 'parsers/vasp'. """
+''' A dict to access parsers by name. Usually 'parsers/<...>', e.g. 'parsers/vasp'. '''
diff --git a/nomad/parsing/artificial.py b/nomad/parsing/artificial.py
index 969f8f2009..fe85e9bf53 100644
--- a/nomad/parsing/artificial.py
+++ b/nomad/parsing/artificial.py
@@ -12,9 +12,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 Parser for creating artificial test, brenchmark, and demonstration data.
-"""
+'''
 
 import json
 import os.path
@@ -40,7 +40,7 @@ meta_info_env, _ = loadJsonFile(filePath=meta_info_path, dependencyLoader=None,
 
 
 class ArtificalParser(Parser):
-    """ Base class for artifical parsers based on VASP metainfo. """
+    ''' Base class for artifical parsers based on VASP metainfo. '''
     def __init__(self):
         super().__init__()
         self.backend = None
@@ -54,9 +54,9 @@ class ArtificalParser(Parser):
 
 
 class EmptyParser(MatchingParser):
-    """
+    '''
     Implementation that produces an empty code_run
-    """
+    '''
     def run(self, mainfile: str, logger=None) -> LocalBackend:
         backend = LocalBackend(metaInfoEnv=meta_info_env, debug=False)  # type: ignore
         backend.openSection('section_run')
@@ -66,10 +66,10 @@ class EmptyParser(MatchingParser):
 
 
 class TemplateParser(ArtificalParser):
-    """
+    '''
     A parser that generates data based on a template given via the
     mainfile. The template is basically some archive json. Only
-    """
+    '''
     name = 'parsers/template'
 
     def is_mainfile(
@@ -78,11 +78,11 @@ class TemplateParser(ArtificalParser):
         return filename.endswith('template.json')
 
     def transform_value(self, name, value):
-        """ allow subclasses to modify values """
+        ''' allow subclasses to modify values '''
         return value
 
     def transform_section(self, name, section):
-        """ allow subclasses to modify sections """
+        ''' allow subclasses to modify sections '''
         return section
 
     def add_section(self, section):
@@ -130,7 +130,7 @@ class TemplateParser(ArtificalParser):
 
 
 class ChaosParser(ArtificalParser):
-    """
+    '''
     Parser that emulates typical error situations. Files can contain a json string (or
     object with key `chaos`) with one of the following string values:
     - exit
@@ -139,7 +139,7 @@ class ChaosParser(ArtificalParser):
     - exception
     - segfault
     - random
-    """
+    '''
     name = 'parsers/chaos'
 
     def is_mainfile(
@@ -212,7 +212,7 @@ class GenerateRandomParser(TemplateParser):
         return os.path.basename(filename).startswith('random_')
 
     def transform_section(self, name, section):
-        """ allow subclasses to modify sections """
+        ''' allow subclasses to modify sections '''
         if name == 'section_system':
             atoms = []
             atom_positions = []
diff --git a/nomad/parsing/backend.py b/nomad/parsing/backend.py
index 28b118b10a..0f25a91b54 100644
--- a/nomad/parsing/backend.py
+++ b/nomad/parsing/backend.py
@@ -55,124 +55,124 @@ class WrongContextState(Exception):
 
 
 class AbstractParserBackend(metaclass=ABCMeta):
-    """
+    '''
     This ABS provides the parser backend interface used by the NOMAD-coe parsers
     and normalizers.
-    """
+    '''
     @abstractmethod
     def metaInfoEnv(self):
-        """ Returns the meta info used by this backend. """
+        ''' Returns the meta info used by this backend. '''
         pass
 
     @abstractmethod
     def startedParsingSession(
             self, mainFileUri, parserInfo, parserStatus=None, parserErrors=None):
-        """
+        '''
         Should be called when the parsing starts.
         ParserInfo should be a valid json dictionary.
-        """
+        '''
         pass
 
     @abstractmethod
     def finishedParsingSession(
             self, parserStatus, parserErrors, mainFileUri=None, parserInfo=None,
             parsingStats=None):
-        """ Called when the parsing finishes. """
+        ''' Called when the parsing finishes. '''
         pass
 
     @abstractmethod
     def openContext(self, contextUri: str):
-        """ Open existing archive data to introduce new data into an existing section. """
+        ''' Open existing archive data to introduce new data into an existing section. '''
         pass
 
     @abstractmethod
     def closeContext(self, contextUri: str):
-        """ Close priorly opened existing archive data again. """
+        ''' Close priorly opened existing archive data again. '''
         pass
 
     @abstractmethod
     def openSection(self, metaName, parent_index=-1):
-        """ Opens a new section and returns its new unique gIndex. """
+        ''' Opens a new section and returns its new unique gIndex. '''
         pass
 
     @abstractmethod
     def closeSection(self, metaName, gIndex):
-        """
+        '''
         Closes the section with the given meta name and index. After this, no more
         value can be added to this section.
-        """
+        '''
         pass
 
     @abstractmethod
     def openNonOverlappingSection(self, metaName):
-        """ Opens a new non overlapping section. """
+        ''' Opens a new non overlapping section. '''
         pass
 
     @abstractmethod
     def setSectionInfo(self, metaName, gIndex, references):
-        """
+        '''
         Sets info values of an open section references should be a dictionary with the
         gIndexes of the root sections this section refers to.
-        """
+        '''
         pass
 
     @abstractmethod
     def closeNonOverlappingSection(self, metaName):
-        """
+        '''
         Closes the current non overlapping section for the given meta name. After
         this, no more value can be added to this section.
-        """
+        '''
         pass
 
     @abstractmethod
     def openSections(self):
-        """ Returns the sections that are still open as metaName, gIndex tuples. """
+        ''' Returns the sections that are still open as metaName, gIndex tuples. '''
         pass
 
     @abstractmethod
     def addValue(self, metaName, value, gIndex=-1):
-        """
+        '''
         Adds a json value for the given metaName. The gIndex is used to identify
         the right parent section.
-        """
+        '''
         pass
 
     @abstractmethod
     def addRealValue(self, metaName, value, gIndex=-1):
-        """
+        '''
         Adds a float value for the given metaName. The gIndex is used to identify
         the right parent section.
-        """
+        '''
         pass
 
     @abstractmethod
     def addArray(self, metaName, shape, gIndex=-1):
-        """
+        '''
         Adds an unannitialized array of the given shape for the given metaName.
         The gIndex is used to identify the right parent section.
         This is neccessary before array values can be set with :func:`setArrayValues`.
-        """
+        '''
 
     @abstractmethod
     def setArrayValues(self, metaName, values, offset=None, gIndex=-1):
-        """
+        '''
         Adds values of the given numpy array to the last array added for the given
         metaName and parent gIndex.
-        """
+        '''
         pass
 
     @abstractmethod
     def addArrayValues(self, metaName, values, gIndex=-1, override: bool = False):
-        """
+        '''
         Adds an array with the given numpy array values for the given metaName and
         parent section gIndex. Override determines whether to rewrite exisiting values
         in the backend.
-        """
+        '''
         pass
 
     @abstractmethod
     def pwarn(self, msg):
-        """ Used to catch parser warnings. """
+        ''' Used to catch parser warnings. '''
         pass
 
     # The following are extensions to the origin NOMAD-coe parser backend. And allow
@@ -185,34 +185,34 @@ class AbstractParserBackend(metaclass=ABCMeta):
 
     @abstractmethod
     def get_sections(self, meta_name: str, g_index: int = -1) -> List[int]:
-        """ Return all gIndices for existing sections of the given meta_name and parent section index. """
+        ''' Return all gIndices for existing sections of the given meta_name and parent section index. '''
         pass
 
     @abstractmethod
     def get_value(self, metaName: str, g_index=-1) -> Any:
-        """
+        '''
         Return the value set to the given meta_name in its parent section of the given index.
         An index of -1 (default) is only allowed if there is exactly one parent section.
-        """
+        '''
         pass
 
     def write_json(
             self, out: TextIO, pretty=True, filter: Callable[[str, Any], Any] = None,
             root_sections: List[str] = ['section_run', 'section_entry_info']):
-        """ Writes the backend contents. """
+        ''' Writes the backend contents. '''
         pass
 
     def add_mi2_section(self, section: MSection):
-        """ Allows to mix a metainfo2 style section into backend. """
+        ''' Allows to mix a metainfo2 style section into backend. '''
         pass
 
     def get_mi2_section(self, section_def: MI2Section):
-        """ Allows to mix a metainfo2 style section into backend. """
+        ''' Allows to mix a metainfo2 style section into backend. '''
         pass
 
     def traverse(self, *args, **kwargs) -> Iterable[Tuple[str, str, Any]]:
-        """ Traverses the backend data and yiels tuples with metainfo name, event type,
-        and value """
+        ''' Traverses the backend data and yiels tuples with metainfo name, event type,
+        and value '''
         pass
 
 
@@ -222,7 +222,7 @@ class JSONStreamWriter():
     ARRAY = 2
     KEY_VALUE = 3
 
-    """
+    '''
     A generator that allows to output JSON based on calling 'event' functions.
     Its pure python and could be replaced by some faster implementation, e.g. yajl-py.
     It uses standard json decode to write values. This allows to mix streaming with
@@ -236,7 +236,7 @@ class JSONStreamWriter():
     Raises:
         AssertionError: If methods were called in a non JSON fashion. Call :func:`close`
         to make sure everything was closed properly.
-    """
+    '''
     def __init__(self, file, pretty=False):
         self._fp = file
         self._pretty = pretty
@@ -335,10 +335,10 @@ class JSONStreamWriter():
 
 
 class LegacyParserBackend(AbstractParserBackend):
-    """
+    '''
     Partial implementation of :class:`AbstractParserBackend` that implements some
     methods that are independent from the core backend implementation.
-    """
+    '''
     def __init__(self, logger):
         self.logger = logger if logger is not None else get_logger(__name__)
 
@@ -365,10 +365,10 @@ class LegacyParserBackend(AbstractParserBackend):
             self._warnings.append('There are more warnings, check the processing logs.')
 
     def _parse_context_uri(self, context_uri: str) -> Tuple[str, int]:
-        """
+        '''
         Returns the last segment of the given context uri, i.e. the section that
         constitutes the context.
-        """
+        '''
         path_str = re.sub(r'^(nmd://[^/]+/[^/]+)?/', '', context_uri, count=1)
         path = path_str.split('/')[::-1]  # reversed path via extended slice syntax
 
@@ -388,7 +388,7 @@ class LegacyParserBackend(AbstractParserBackend):
 
     @property
     def status(self) -> ParserStatus:
-        """ Returns status and potential errors. """
+        ''' Returns status and potential errors. '''
         return (self._status, self._errors)
 
     def reset_status(self) -> None:
@@ -398,12 +398,12 @@ class LegacyParserBackend(AbstractParserBackend):
 
 
 class LocalBackend(LegacyParserBackend, metaclass=DelegatingMeta):
-    """
+    '''
     This implementation of :class:`AbstractParserBackend` is a extended version of
     NOMAD-coe's ``LocalBackend`` that allows to write the results in an *archive*-style .json.
     It can be used like the original thing, but also allows to output archive JSON
     after parsing via :func:`write_json`.
-    """
+    '''
     def __init__(self, *args, **kwargs):
         logger = kwargs.pop('logger', None)
         super().__init__(logger=logger)
@@ -417,7 +417,7 @@ class LocalBackend(LegacyParserBackend, metaclass=DelegatingMeta):
         return self.data[metaname]
 
     def __getattr__(self, name):
-        """ Support for unimplemented and unexpected methods. """
+        ''' Support for unimplemented and unexpected methods. '''
         if name not in self._known_attributes and self._unknown_attributes.get(name) is None:
             self.logger.debug('Access of unexpected backend attribute/method', attribute=name)
             self._unknown_attributes[name] = name
@@ -425,11 +425,11 @@ class LocalBackend(LegacyParserBackend, metaclass=DelegatingMeta):
         return getattr(self._delegate, name)
 
     def add_mi2_section(self, section: MSection):
-        """ Allows to mix a metainfo2 style section into backend. """
+        ''' Allows to mix a metainfo2 style section into backend. '''
         self.mi2_data[section.m_def.name] = section
 
     def get_mi2_section(self, section_def: MI2Section):
-        """ Allows to mix a metainfo2 style section into backend. """
+        ''' Allows to mix a metainfo2 style section into backend. '''
         return self.mi2_data.get(section_def.name, None)
 
     def finishedParsingSession(self, *args, **kwargs):
@@ -558,7 +558,7 @@ class LocalBackend(LegacyParserBackend, metaclass=DelegatingMeta):
     def write_json(
             self, out: TextIO, pretty=True, filter: Callable[[str, Any], Any] = None,
             root_sections: List[str] = ['section_run', 'section_entry_info']):
-        """
+        '''
         Writes the results stored in the backend after parsing in an 'archive'.json
         style format.
 
@@ -566,7 +566,7 @@ class LocalBackend(LegacyParserBackend, metaclass=DelegatingMeta):
             out: The file-like that is used to write the json to.
             pretty: Format the json or not.
             filter: Optional filter that takes metaname, value pairs and returns a new value.
-        """
+        '''
         json_writer = JSONStreamWriter(out, pretty=pretty)
         json_writer.open_object()
 
diff --git a/nomad/parsing/metainfo.py b/nomad/parsing/metainfo.py
index 618b1bd863..8db70c5b27 100644
--- a/nomad/parsing/metainfo.py
+++ b/nomad/parsing/metainfo.py
@@ -29,7 +29,7 @@ from .backend import LegacyParserBackend
 
 
 class MetainfoBackend(LegacyParserBackend):
-    """ A backend that uses the new metainfo to store all data. """
+    ''' A backend that uses the new metainfo to store all data. '''
 
     def __init__(self, env: LegacyMetainfoEnvironment, logger=None):
         super().__init__(logger=logger)
@@ -69,22 +69,22 @@ class MetainfoBackend(LegacyParserBackend):
         return current
 
     def openContext(self, context_uri: str):
-        """ Open existing archive data to introduce new data into an existing section. """
+        ''' Open existing archive data to introduce new data into an existing section. '''
         resolved = self.resolve_context(context_uri)
         self.open_sections_by_def.setdefault(resolved.m_def, []).append(resolved)
 
     def closeContext(self, context_uri: str):
-        """ Close priorly opened existing archive data again. """
+        ''' Close priorly opened existing archive data again. '''
         resolved = self.resolve_context(context_uri)
         self.open_sections_by_def.setdefault(resolved.m_def, []).remove(resolved)
 
     def openSection(self, name):
-        """
+        '''
         It will assume that there is a sub-section def with the given name.
         It will use the latest opened section of the sub-sections parent as the parent
         for the new section.
         An Exception will be known root sections, e.g. 'section_run'.
-        """
+        '''
         if name in ['section_run', 'section_entry_info']:
             section_def = self.env.resolve_definition(name, Section)
             sub_section = self.resource.create(section_def.section_cls)
@@ -108,7 +108,7 @@ class MetainfoBackend(LegacyParserBackend):
         return sub_section.m_parent_index
 
     def get_open_section_for_quantity(self, name, g_index):
-        """ Returns the open section that contains the quantity of the given name. """
+        ''' Returns the open section that contains the quantity of the given name. '''
         quantity_def = self.env.resolve_definition(name, Quantity)
         section_def = quantity_def.m_parent_as(Section)
         sections = self.open_sections_by_def.get(section_def, [])
@@ -149,10 +149,10 @@ class MetainfoBackend(LegacyParserBackend):
         return self.openSection(metaName)
 
     def setSectionInfo(self, metaName, gIndex, references):
-        """
+        '''
         Sets info values of an open section references should be a dictionary with the
         gIndexes of the root sections this section refers to.
-        """
+        '''
         # TODO might be necessary to make references work?
         pass
 
@@ -160,7 +160,7 @@ class MetainfoBackend(LegacyParserBackend):
         return self.closeSection(name, -1)
 
     def openSections(self):
-        """ Returns the sections that are still open as metaName, gIndex tuples. """
+        ''' Returns the sections that are still open as metaName, gIndex tuples. '''
         for section_def, sub_sections in self.open_sections_by_def:
             for sub_section in sub_sections:
                 yield section_def.name, sub_section.m_parent_index
@@ -187,26 +187,26 @@ class MetainfoBackend(LegacyParserBackend):
         self.addValue(name, value, g_index)
 
     def addArray(self, name, shape, g_index=-1):
-        """
+        '''
         Adds an unannitialized array of the given shape for the given metaName.
         The gIndex is used to identify the right parent section.
         This is neccessary before array values can be set with :func:`setArrayValues`.
-        """
+        '''
         raise NotImplementedError()
 
     def setArrayValues(self, metaName, values, offset=None, gIndex=-1):
-        """
+        '''
         Adds values of the given numpy array to the last array added for the given
         metaName and parent gIndex.
-        """
+        '''
         raise NotImplementedError()
 
     def addArrayValues(self, name, values, g_index=-1, override: bool = False):
-        """
+        '''
         Adds an array with the given numpy array values for the given metaName and
         parent section gIndex. Override determines whether to rewrite exisiting values
         in the backend.
-        """
+        '''
         section, quantity_def = self.get_open_section_for_quantity(name, g_index)
         if isinstance(quantity_def.type, Reference):
             # quantity is a reference
@@ -239,17 +239,17 @@ class MetainfoBackend(LegacyParserBackend):
             'This method does not make sense in the context of the new metainfo.')
 
     def get_sections(self, meta_name: str, g_index: int = -1) -> List[int]:
-        """ Return all gIndices for existing sections of the given meta_name and parent index. """
+        ''' Return all gIndices for existing sections of the given meta_name and parent index. '''
         section_def = self.env.resolve_definition(meta_name, Section)
         return [
             section.m_parent_index for section in self.resource.all(section_def.section_cls)
             if g_index == -1 or section.m_parent.m_parent_index == g_index]
 
     def get_value(self, meta_name: str, g_index=-1) -> Any:
-        """
+        '''
         Return the value set to the given meta_name in its parent section of the given index.
         An index of -1 (default) is only allowed if there is exactly one parent section.
-        """
+        '''
         try:
             quantity = self.env.resolve_definition(meta_name, Quantity)
         except KeyError:
diff --git a/nomad/parsing/parser.py b/nomad/parsing/parser.py
index a9c14db93f..b4204634ff 100644
--- a/nomad/parsing/parser.py
+++ b/nomad/parsing/parser.py
@@ -29,10 +29,10 @@ from nomad.parsing.backend import LocalBackend
 
 
 class Parser(metaclass=ABCMeta):
-    """
+    '''
     Instances specify a parser. It allows to find *main files* from  given uploaded
     and extracted files. Further, allows to run the parser on those 'main files'.
-    """
+    '''
 
     def __init__(self):
         self.domain = 'dft'
@@ -41,7 +41,7 @@ class Parser(metaclass=ABCMeta):
     def is_mainfile(
             self, filename: str, mime: str, buffer: bytes, decoded_buffer: str,
             compression: str = None) -> bool:
-        """
+        '''
         Checks if a file is a mainfile for the parsers.
 
         Arguments:
@@ -49,12 +49,12 @@ class Parser(metaclass=ABCMeta):
             mime: The mimetype of the mainfile guessed with libmagic
             buffer: The first 2k of the mainfile contents
             compression: The compression of the mainfile ``[None, 'gz', 'bz2']``
-        """
+        '''
         pass
 
     @abstractmethod
     def run(self, mainfile: str, logger=None) -> LocalBackend:
-        """
+        '''
         Runs the parser on the given mainfile. It uses :class:`LocalBackend` as
         a backend. The meta-info access is handled by the underlying NOMAD-coe parser.
 
@@ -64,14 +64,14 @@ class Parser(metaclass=ABCMeta):
 
         Returns:
             The used :class:`LocalBackend` with status information and result data.
-        """
+        '''
 
 
 class BrokenParser(Parser):
-    """
+    '''
     A parser implementation that just fails and is used to match mainfiles with known
     patterns of corruption.
-    """
+    '''
     def __init__(self, *args, **kwargs):
         super().__init__(*args, **kwargs)
         self.name = 'parser/broken'
@@ -97,7 +97,7 @@ class BrokenParser(Parser):
 
 
 class MatchingParser(Parser):
-    """
+    '''
     A parser implementation that used regular experessions to match mainfiles.
 
     Arguments:
@@ -107,7 +107,7 @@ class MatchingParser(Parser):
         mainfile_name_re: A regexp that is used to match the paths of potential mainfiles
         domain: The domain that this parser should be used for. Default is 'dft'.
         supported_compressions: A list of [gz, bz2], if the parser supports compressed files
-    """
+    '''
     def __init__(
             self, name: str, code_name: str,
             mainfile_contents_re: str = None,
@@ -153,10 +153,10 @@ class MatchingParser(Parser):
 
 
 class MissingParser(MatchingParser):
-    """
+    '''
     A parser implementation that just fails and is used to match mainfiles with known
     patterns of corruption.
-    """
+    '''
     def __init__(self, *args, **kwargs):
         super().__init__(*args, **kwargs)
 
@@ -165,14 +165,14 @@ class MissingParser(MatchingParser):
 
 
 class LegacyParser(MatchingParser):
-    """
+    '''
     A parser implementation for legacy NOMAD-coe parsers. It assumes that parsers
     are installed to the python environment.
 
     Arguments:
         parser_class_name: the main parser class that implements NOMAD-coe's
         backend_factory: a callable that returns a backend, takes meta_info and logger as argument
-    """
+    '''
     def __init__(self, parser_class_name: str, *args, backend_factory=None, **kwargs) -> None:
         super().__init__(*args, **kwargs)
 
@@ -212,11 +212,11 @@ class LegacyParser(MatchingParser):
 
 
 class VaspOutcarParser(LegacyParser):
-    """
+    '''
     LegacyParser that only matches mailfiles, if there is no .xml in the
     same directory, i.e. to use the VASP OUTCAR parser in absence of .xml
     output file.
-    """
+    '''
     def __init__(self, *args, **kwargs):
         super().__init__(*args, **kwargs)
         self.name = 'parsers/vaspoutcar'
diff --git a/nomad/processing/__init__.py b/nomad/processing/__init__.py
index 4454cdbc60..34cd30e2e9 100644
--- a/nomad/processing/__init__.py
+++ b/nomad/processing/__init__.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 Processing comprises everything that is necessary to take an uploaded user file,
 processes it, and store all necessary data for *repository*, *archive*, and potential
 future services (e.g. *encyclopedia*).
@@ -54,7 +54,7 @@ classes do represent the processing state, as well as the respective entity.
     :members:
 .. autoclass:: nomad.processing.data.Calc
     :members:
-"""
+'''
 
 from nomad.processing.base import app, InvalidId, ProcNotRegistered, SUCCESS, FAILURE, \
     RUNNING, PENDING, PROCESS_COMPLETED, PROCESS_RUNNING, ProcessAlreadyRunning
diff --git a/nomad/processing/base.py b/nomad/processing/base.py
index 0fd5c3dc9b..3dc11be879 100644
--- a/nomad/processing/base.py
+++ b/nomad/processing/base.py
@@ -107,7 +107,7 @@ class ProcMetaclass(TopLevelDocumentMetaclass):
 
 
 class Proc(Document, metaclass=ProcMetaclass):
-    """
+    '''
     Base class for objects that are involved in processing and need persistent processing
     state.
 
@@ -133,14 +133,14 @@ class Proc(Document, metaclass=ProcMetaclass):
         complete_time: the time that processing completed (successfully or not)
         current_process: the currently or last run asyncronous process
         process_status: the status of the currently or last run asyncronous process
-    """
+    '''
 
     meta: Any = {
         'abstract': True,
     }
 
     tasks: List[str] = None
-    """ the ordered list of tasks that comprise a processing run """
+    ''' the ordered list of tasks that comprise a processing run '''
 
     current_task = StringField(default=None)
     tasks_status = StringField(default=CREATED)
@@ -158,17 +158,17 @@ class Proc(Document, metaclass=ProcMetaclass):
 
     @property
     def tasks_running(self) -> bool:
-        """ Returns True of the process has failed or succeeded. """
+        ''' Returns True of the process has failed or succeeded. '''
         return self.tasks_status not in [SUCCESS, FAILURE]
 
     @property
     def process_running(self) -> bool:
-        """ Returns True of an asynchrounous process is currently running. """
+        ''' Returns True of an asynchrounous process is currently running. '''
         return self.process_status is not None and self.process_status != PROCESS_COMPLETED
 
     @classmethod
     def process_running_mongoengine_query(cls):
-        """ Returns a mongoengine query dict (to be used in objects) to find running processes. """
+        ''' Returns a mongoengine query dict (to be used in objects) to find running processes. '''
         return dict(process_status__in=[PROCESS_CALLED, PROCESS_RUNNING])
 
     def get_logger(self):
@@ -179,9 +179,9 @@ class Proc(Document, metaclass=ProcMetaclass):
 
     @classmethod
     def create(cls, **kwargs):
-        """ Factory method that must be used instead of regular constructor. """
+        ''' Factory method that must be used instead of regular constructor. '''
         assert 'tasks_status' not in kwargs, \
-            """ do not set the status manually, its managed """
+            ''' do not set the status manually, its managed '''
 
         kwargs.setdefault('create_time', datetime.utcnow())
         self = cls(**kwargs)
@@ -194,7 +194,7 @@ class Proc(Document, metaclass=ProcMetaclass):
         return self
 
     def reset(self, worker_hostname: str = None):
-        """ Resets the task chain. Assumes there no current running process. """
+        ''' Resets the task chain. Assumes there no current running process. '''
         assert not self.process_running
 
         self.current_task = None
@@ -206,7 +206,7 @@ class Proc(Document, metaclass=ProcMetaclass):
 
     @classmethod
     def reset_pymongo_update(cls, worker_hostname: str = None):
-        """ Returns a pymongo update dict part to reset calculations. """
+        ''' Returns a pymongo update dict part to reset calculations. '''
         return dict(
             current_task=None, process_status=None, tasks_status=PENDING, errors=[], warnings=[],
             worker_hostname=worker_hostname)
@@ -244,7 +244,7 @@ class Proc(Document, metaclass=ProcMetaclass):
             logger.critical(msg, **kwargs)
 
     def fail(self, *errors, log_level=logging.ERROR, **kwargs):
-        """ Allows to fail the process. Takes strings or exceptions as args. """
+        ''' Allows to fail the process. Takes strings or exceptions as args. '''
         assert self.process_running or self.tasks_running, 'Cannot fail a completed process.'
 
         failed_with_exception = False
@@ -274,7 +274,7 @@ class Proc(Document, metaclass=ProcMetaclass):
         self.save()
 
     def warning(self, *warnings, log_level=logging.WARNING, **kwargs):
-        """ Allows to save warnings. Takes strings or exceptions as args. """
+        ''' Allows to save warnings. Takes strings or exceptions as args. '''
         assert self.process_running or self.tasks_running
 
         logger = self.get_logger(**kwargs)
@@ -326,30 +326,30 @@ class Proc(Document, metaclass=ProcMetaclass):
             self.get_logger().info('completed process')
 
     def on_tasks_complete(self):
-        """ Callback that is called when the list of task are completed """
+        ''' Callback that is called when the list of task are completed '''
         pass
 
     def on_process_complete(self, process_name):
-        """ Callback that is called when the corrent process completed """
+        ''' Callback that is called when the corrent process completed '''
         pass
 
     def block_until_complete(self, interval=0.01):
-        """
+        '''
         Reloads the process constantly until it sees a completed process. Should be
         used with care as it can block indefinitely. Just intended for testing purposes.
-        """
+        '''
         while self.tasks_running or self.process_running:
             time.sleep(interval)
             self.reload()
 
     @classmethod
     def process_all(cls, func, query: Dict[str, Any], exclude: List[str] = []):
-        """
+        '''
         Allows to run process functions for all objects on the given query. Calling
         process functions though the func:`process` wrapper might be slow, because
         it causes a save on each call. This function will use a query based update to
         do the same for all objects at once.
-        """
+        '''
 
         running_query = dict(cls.process_running_mongoengine_query())
         running_query.update(query)
@@ -388,14 +388,14 @@ class Proc(Document, metaclass=ProcMetaclass):
 
 
 def task(func):
-    """
+    '''
     The decorator for tasks that will be wrapped in exception handling that will fail the process.
     The task methods of a :class:`Proc` class/document comprise a sequence
     (order of methods in class namespace) of tasks. Tasks must be executed in that order.
     Completion of the last task, will put the :class:`Proc` instance into the
     SUCCESS state. Calling the first task will put it into RUNNING state. Tasks will
     only be executed, if the process has not yet reached FAILURE state.
-    """
+    '''
     @functools.wraps(func)
     def wrapper(self, *args, **kwargs):
         try:
@@ -425,20 +425,20 @@ def task(func):
 
 
 def all_subclasses(cls):
-    """ Helper method to calculate set of all subclasses of a given class. """
+    ''' Helper method to calculate set of all subclasses of a given class. '''
     return set(cls.__subclasses__()).union(
         [s for c in cls.__subclasses__() for s in all_subclasses(c)])
 
 
 all_proc_cls = {cls.__name__: cls for cls in all_subclasses(Proc)}
-""" Name dictionary for all Proc classes. """
+''' Name dictionary for all Proc classes. '''
 
 
 class NomadCeleryRequest(Request):
-    """
+    '''
     A custom celery request class that allows to catch error in the worker main
     thread, which cannot be caught on the worker threads themselves.
-    """
+    '''
 
     def _fail(self, event, **kwargs):
         args = self._payload[0]
@@ -480,9 +480,9 @@ class NomadCeleryTask(Task):
 
 
 def unwarp_task(task, cls_name, self_id, *args, **kwargs):
-    """
+    '''
     Retrieves the proc object that the given task is executed on from the database.
-    """
+    '''
     logger = utils.get_logger(__name__, cls=cls_name, id=self_id)
 
     # get the process class
@@ -521,13 +521,13 @@ def unwarp_task(task, cls_name, self_id, *args, **kwargs):
     acks_late=config.celery.acks_late, soft_time_limit=config.celery.timeout,
     time_limit=config.celery.timeout * 2)
 def proc_task(task, cls_name, self_id, func_attr):
-    """
+    '''
     The celery task that is used to execute async process functions.
     It ignores results, since all results are handled via the self document.
     It retries for 3 times with a countdown of 3 on missing 'selfs', since this
     might happen in sharded, distributed mongo setups where the object might not
     have yet been propagated and therefore appear missing.
-    """
+    '''
     self = unwarp_task(task, cls_name, self_id)
 
     logger = self.get_logger()
@@ -576,14 +576,14 @@ def proc_task(task, cls_name, self_id, func_attr):
 
 
 def process(func):
-    """
+    '''
     The decorator for process functions that will be called async via celery.
     All calls to the decorated method will result in celery task requests.
     To transfer state, the instance will be saved to the database and loading on
     the celery task worker. Process methods can call other (process) functions/methods on
     other :class:`Proc` instances. Each :class:`Proc` instance can only run one
     any process at a time.
-    """
+    '''
     @functools.wraps(func)
     def wrapper(self, *args, **kwargs):
         assert len(args) == 0 and len(kwargs) == 0, 'process functions must not have arguments'
diff --git a/nomad/processing/data.py b/nomad/processing/data.py
index 465c684af7..7a58323da1 100644
--- a/nomad/processing/data.py
+++ b/nomad/processing/data.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 This module comprises a set of persistent document classes that hold all user related
 data. These are information about users, their uploads and datasets, the associated
 calculations, and files
@@ -22,9 +22,9 @@ calculations, and files
 
 .. autoclass:: Upload
 
-"""
+'''
 
-from typing import cast, List, Any, ContextManager, Tuple, Generator, Dict, cast
+from typing import cast, List, Any, ContextManager, Tuple, Generator, Dict, cast, Iterable
 from mongoengine import StringField, DateTimeField, DictField, BooleanField, IntField
 import logging
 from structlog import wrap_logger
@@ -41,7 +41,6 @@ from nomad.files import PathObject, UploadFiles, ExtractError, ArchiveBasedStagi
 from nomad.processing.base import Proc, process, task, PENDING, SUCCESS, FAILURE
 from nomad.parsing import parser_dict, match_parser, LocalBackend
 from nomad.normalizing import normalizers
-from nomad.datamodel import UploadWithMetadata
 
 
 def _pack_log_event(logger, method_name, event_dict):
@@ -66,14 +65,8 @@ _log_processors = [
     TimeStamper(fmt="%Y-%m-%d %H:%M.%S", utc=False)]
 
 
-_all_root_sections = []
-for domain in datamodel.Domain.instances.values():
-    for root_section in domain.root_sections:
-        _all_root_sections.append(root_section)
-
-
 class Calc(Proc):
-    """
+    '''
     Instances of this class represent calculations. This class manages the elastic
     search index entry, files, and archive for the respective calculation.
 
@@ -88,8 +81,8 @@ class Calc(Proc):
         upload_id: the id of the upload used to create this calculation
         mainfile: the mainfile (including path in upload) that was used to create this calc
 
-        metadata: the metadata record wit calc and user metadata, see :class:`datamodel.CalcWithMetadata`
-    """
+        metadata: the metadata record wit calc and user metadata, see :class:`datamodel.EntryMetadata`
+    '''
     calc_id = StringField(primary_key=True)
     upload_id = StringField()
     mainfile = StringField()
@@ -120,12 +113,12 @@ class Calc(Proc):
         self._calc_proc_logwriter_ctx: ContextManager = None
 
     @classmethod
-    def from_calc_with_metadata(cls, calc_with_metadata):
+    def from_entry_metadata(cls, entry_metadata):
         calc = Calc.create(
-            calc_id=calc_with_metadata.calc_id,
-            upload_id=calc_with_metadata.upload_id,
-            mainfile=calc_with_metadata.mainfile,
-            metadata=calc_with_metadata.to_dict())
+            calc_id=entry_metadata.calc_id,
+            upload_id=entry_metadata.upload_id,
+            mainfile=entry_metadata.mainfile,
+            metadata=entry_metadata.m_to_dict(include_defaults=True))
 
         return calc
 
@@ -152,10 +145,10 @@ class Calc(Proc):
         return self._upload_files
 
     def get_logger(self, **kwargs):
-        """
+        '''
         Returns a wrapped logger that additionally saves all entries to the calculation
         processing log in the archive.
-        """
+        '''
         logger = super().get_logger()
         logger = logger.bind(
             upload_id=self.upload_id, mainfile=self.mainfile, calc_id=self.calc_id, **kwargs)
@@ -189,11 +182,11 @@ class Calc(Proc):
 
     @process
     def re_process_calc(self):
-        """
+        '''
         Processes a calculation again. This means there is already metadata and
         instead of creating it initially, we are just updating the existing
         records.
-        """
+        '''
         parser = match_parser(self.mainfile, self.upload_files, strict=False)
 
         if parser is None and not config.reprocess_unmatched:
@@ -228,16 +221,16 @@ class Calc(Proc):
                 parser=parser.name)
 
         try:
-            calc_with_metadata = datamodel.CalcWithMetadata(**self.metadata)
-            calc_with_metadata.upload_id = self.upload_id
-            calc_with_metadata.calc_id = self.calc_id
-            calc_with_metadata.calc_hash = self.upload_files.calc_hash(self.mainfile)
-            calc_with_metadata.mainfile = self.mainfile
-            calc_with_metadata.nomad_version = config.version
-            calc_with_metadata.nomad_commit = config.commit
-            calc_with_metadata.last_processing = datetime.utcnow()
-            calc_with_metadata.files = self.upload_files.calc_files(self.mainfile)
-            self.metadata = calc_with_metadata.to_dict()
+            entry_metadata = datamodel.EntryMetadata.m_from_dict(self.metadata)
+            entry_metadata.upload_id = self.upload_id
+            entry_metadata.calc_id = self.calc_id
+            entry_metadata.calc_hash = self.upload_files.calc_hash(self.mainfile)
+            entry_metadata.mainfile = self.mainfile
+            entry_metadata.nomad_version = config.version
+            entry_metadata.nomad_commit = config.commit
+            entry_metadata.last_processing = datetime.utcnow()
+            entry_metadata.files = self.upload_files.calc_files(self.mainfile)
+            self.metadata = entry_metadata.m_to_dict(include_defaults=True)
 
             self.parsing()
             self.normalizing()
@@ -253,10 +246,10 @@ class Calc(Proc):
 
     @process
     def process_calc(self):
-        """
+        '''
         Processes a new calculation that has no prior records in the mongo, elastic,
         or filesystem storage. It will create an initial set of (user) metadata.
-        """
+        '''
         logger = self.get_logger()
         if self.upload is None:
             logger.error('calculation upload does not exist')
@@ -264,23 +257,23 @@ class Calc(Proc):
         try:
             # save preliminary minimum calc metadata in case processing fails
             # successful processing will replace it with the actual metadata
-            calc_with_metadata = datamodel.CalcWithMetadata(
-                domain=parser_dict[self.parser].domain,
-                upload_id=self.upload_id,
-                calc_id=self.calc_id,
-                calc_hash=self.upload_files.calc_hash(self.mainfile),
-                mainfile=self.mainfile)
-            calc_with_metadata.published = False
-            calc_with_metadata.uploader = self.upload.user_id
-            calc_with_metadata.upload_time = self.upload.upload_time
-            calc_with_metadata.upload_name = self.upload.name
-            calc_with_metadata.nomad_version = config.version
-            calc_with_metadata.nomad_commit = config.commit
-            calc_with_metadata.last_processing = datetime.utcnow()
-            calc_with_metadata.files = self.upload_files.calc_files(self.mainfile)
-            self.metadata = calc_with_metadata.to_dict()
-
-            if len(calc_with_metadata.files) >= config.auxfile_cutoff:
+            calc_metadata = datamodel.EntryMetadata()
+            calc_metadata.domain = parser_dict[self.parser].domain
+            calc_metadata.upload_id = self.upload_id
+            calc_metadata.calc_id = self.calc_id
+            calc_metadata.calc_hash = self.upload_files.calc_hash(self.mainfile)
+            calc_metadata.mainfile = self.mainfile
+            calc_metadata.calc_hash = self.upload_files.calc_hash(self.mainfile)
+            calc_metadata.nomad_version = config.version
+            calc_metadata.nomad_commit = config.commit
+            calc_metadata.last_processing = datetime.utcnow()
+            calc_metadata.files = self.upload_files.calc_files(self.mainfile)
+            calc_metadata.uploader = self.upload.user_id
+            calc_metadata.upload_time = self.upload.upload_time
+            calc_metadata.upload_name = self.upload.name
+            self.metadata = calc_metadata.m_to_dict(include_defaults=True)  # TODO use embedded doc?
+
+            if len(calc_metadata.files) >= config.auxfile_cutoff:
                 self.warning(
                     'This calc has many aux files in its directory. '
                     'Have you placed many calculations in the same directory?')
@@ -301,25 +294,16 @@ class Calc(Proc):
         # in case of failure, index a minimum set of metadata and mark
         # processing failure
         try:
-            calc_with_metadata = datamodel.CalcWithMetadata(**self.metadata)
-            calc_with_metadata.formula = config.services.not_processed_value
-            calc_with_metadata.basis_set = config.services.not_processed_value
-            calc_with_metadata.xc_functional = config.services.not_processed_value
-            calc_with_metadata.system = config.services.not_processed_value
-            calc_with_metadata.crystal_system = config.services.not_processed_value
-            calc_with_metadata.spacegroup = config.services.not_processed_value
-            calc_with_metadata.spacegroup_symbol = config.services.not_processed_value
-            calc_with_metadata.code_version = config.services.not_processed_value
-
-            calc_with_metadata.code_name = config.services.not_processed_value
+            entry_metadata = datamodel.EntryMetadata.m_from_dict(self.metadata)
             if self.parser is not None:
                 parser = parser_dict[self.parser]
                 if hasattr(parser, 'code_name'):
-                    calc_with_metadata.code_name = parser.code_name
+                    entry_metadata.code_name = parser.code_name
 
-            calc_with_metadata.processed = False
-            self.metadata = calc_with_metadata.to_dict()
-            search.Entry.from_calc_with_metadata(calc_with_metadata).save()
+            entry_metadata.processed = False
+            self.metadata = entry_metadata.m_to_dict(include_defaults=True)
+
+            search.create_entry(entry_metadata).save()
         except Exception as e:
             self.get_logger().error('could not index after processing failure', exc_info=e)
 
@@ -335,7 +319,7 @@ class Calc(Proc):
 
     @task
     def parsing(self):
-        """ The *task* that encapsulates all parsing related actions. """
+        ''' The *task* that encapsulates all parsing related actions. '''
         context = dict(parser=self.parser, step=self.parser)
         logger = self.get_logger(**context)
         parser = parser_dict[self.parser]
@@ -405,7 +389,7 @@ class Calc(Proc):
 
     @task
     def normalizing(self):
-        """ The *task* that encapsulates all normalizing related actions. """
+        ''' The *task* that encapsulates all normalizing related actions. '''
         for normalizer in normalizers:
             if normalizer.domain != parser_dict[self.parser].domain:
                 continue
@@ -435,27 +419,27 @@ class Calc(Proc):
 
     @task
     def archiving(self):
-        """ The *task* that encapsulates all archival related actions. """
+        ''' The *task* that encapsulates all archival related actions. '''
         logger = self.get_logger()
 
-        calc_with_metadata = datamodel.CalcWithMetadata(**self.metadata)
-        calc_with_metadata.apply_domain_metadata(self._parser_backend)
-        calc_with_metadata.processed = True
+        entry_metadata = datamodel.EntryMetadata.m_from_dict(self.metadata)
+        entry_metadata.apply_domain_metadata(self._parser_backend)
+        entry_metadata.processed = True
 
         # persist the calc metadata
         with utils.timer(logger, 'saved calc metadata', step='metadata'):
-            self.metadata = calc_with_metadata.to_dict()
+            self.metadata = entry_metadata.m_to_dict(include_defaults=True)
 
         # index in search
         with utils.timer(logger, 'indexed', step='index'):
-            search.Entry.from_calc_with_metadata(calc_with_metadata).save()
+            search.create_entry(entry_metadata).save()
 
         # persist the archive
         with utils.timer(
                 logger, 'archived', step='archive',
                 input_size=self.mainfile_file.size) as log_data:
             with self.upload_files.archive_file(self.calc_id, 'wt') as out:
-                self._parser_backend.write_json(out, pretty=True, root_sections=_all_root_sections)
+                self._parser_backend.write_json(out, pretty=True, root_sections=datamodel.root_sections)
 
             log_data.update(archive_size=self.upload_files.archive_file_object(self.calc_id).size)
 
@@ -474,7 +458,7 @@ class Calc(Proc):
 
 
 class Upload(Proc):
-    """
+    '''
     Represents uploads in the databases. Provides persistence access to the files storage,
     and processing state.
 
@@ -489,7 +473,7 @@ class Upload(Proc):
         publish_time: Date when the upload was initially published
         last_update: Date of the last publishing/re-processing
         joined: Boolean indicates if the running processing has joined (:func:`check_join`)
-    """
+    '''
     id_field = 'upload_id'
 
     upload_id = StringField(primary_key=True)
@@ -518,13 +502,13 @@ class Upload(Proc):
 
     @property
     def metadata(self) -> dict:
-        """
+        '''
         Getter, setter for user metadata. Metadata is pickled to and from the public
         bucket to allow sharing among all processes. Usually uploads do not have (much)
         user defined metadata, but users provide all metadata per upload as part of
         the publish process. This will change, when we introduce editing functionality
         and metadata will be provided through different means.
-        """
+        '''
         try:
             upload_files = PublicUploadFiles(self.upload_id, is_authorized=lambda: True)
         except KeyError:
@@ -542,7 +526,7 @@ class Upload(Proc):
 
     @classmethod
     def user_uploads(cls, user: datamodel.User, **kwargs) -> List['Upload']:
-        """ Returns all uploads for the given user. Kwargs are passed to mongo query. """
+        ''' Returns all uploads for the given user. Kwargs are passed to mongo query. '''
         return cls.objects(user_id=str(user.user_id), **kwargs)
 
     @property
@@ -561,14 +545,14 @@ class Upload(Proc):
 
     @classmethod
     def create(cls, **kwargs) -> 'Upload':
-        """
+        '''
         Creates a new upload for the given user, a user given name is optional.
         It will populate the record with a signed url and pending :class:`UploadProc`.
         The upload will be already saved to the database.
 
         Arguments:
             user: The user that created the upload.
-        """
+        '''
         # use kwargs to keep compatibility with super method
         user: datamodel.User = kwargs['user']
         del(kwargs['user'])
@@ -583,15 +567,15 @@ class Upload(Proc):
         return self
 
     def delete(self):
-        """ Deletes this upload process state entry and its calcs. """
+        ''' Deletes this upload process state entry and its calcs. '''
         Calc.objects(upload_id=self.upload_id).delete()
         super().delete()
 
     def delete_upload_local(self):
-        """
+        '''
         Deletes the upload, including its processing state and
         staging files. Local version without celery processing.
-        """
+        '''
         logger = self.get_logger()
 
         with utils.lnr(logger, 'staged upload delete failed'):
@@ -609,28 +593,27 @@ class Upload(Proc):
 
     @process
     def delete_upload(self):
-        """
+        '''
         Deletes of the upload, including its processing state and
         staging files. This starts the celery process of deleting the upload.
-        """
+        '''
         self.delete_upload_local()
 
         return True  # do not save the process status on the delete upload
 
     @process
     def publish_upload(self):
-        """
+        '''
         Moves the upload out of staging to the public area. It will
         pack the staging upload files in to public upload files.
-        """
+        '''
         assert self.processed_calcs > 0
 
         logger = self.get_logger()
         logger.info('started to publish')
 
         with utils.lnr(logger, 'publish failed'):
-            upload_with_metadata = self.to_upload_with_metadata(self.metadata)
-            calcs = upload_with_metadata.calcs
+            calcs = self.entries_metadata(self.metadata)
 
             with utils.timer(
                     logger, 'upload metadata updated', step='metadata',
@@ -641,7 +624,7 @@ class Upload(Proc):
                     calc.with_embargo = calc.with_embargo if calc.with_embargo is not None else False
                     return UpdateOne(
                         {'_id': calc.calc_id},
-                        {'$set': {'metadata': calc.to_dict()}})
+                        {'$set': {'metadata': calc.m_to_dict(include_defaults=True)}})
 
                 Calc._get_collection().bulk_write([create_update(calc) for calc in calcs])
 
@@ -649,7 +632,7 @@ class Upload(Proc):
                 with utils.timer(
                         logger, 'staged upload files packed', step='pack',
                         upload_size=self.upload_files.size):
-                    self.upload_files.pack(upload_with_metadata)
+                    self.upload_files.pack(calcs)
 
             with utils.timer(
                     logger, 'index updated', step='index',
@@ -671,7 +654,7 @@ class Upload(Proc):
 
     @process
     def re_process_upload(self):
-        """
+        '''
         A *process* that performs the re-processing of a earlier processed
         upload.
 
@@ -681,7 +664,7 @@ class Upload(Proc):
 
         TODO this implementation does not do any re-matching. This will be more complex
         due to handling of new or missing matches.
-        """
+        '''
         assert self.published
 
         logger = self.get_logger()
@@ -730,7 +713,7 @@ class Upload(Proc):
 
     @process
     def re_pack(self):
-        """ A *process* that repacks the raw and archive data based on the current embargo data. """
+        ''' A *process* that repacks the raw and archive data based on the current embargo data. '''
         assert self.published
 
         # mock the steps of actual processing
@@ -739,19 +722,19 @@ class Upload(Proc):
         self._continue_with('parse_all')
         self._continue_with('cleanup')
 
-        self.upload_files.re_pack(self.to_upload_with_metadata())
+        self.upload_files.re_pack(self.entries_metadata())
         self.joined = True
         self._complete()
 
     @process
     def process_upload(self):
-        """ A *process* that performs the initial upload processing. """
+        ''' A *process* that performs the initial upload processing. '''
         self.extracting()
         self.parse_all()
 
     @task
     def uploading(self):
-        """ A no-op *task* as a stand-in for receiving upload data. """
+        ''' A no-op *task* as a stand-in for receiving upload data. '''
         pass
 
     @property
@@ -772,10 +755,10 @@ class Upload(Proc):
 
     @task
     def extracting(self):
-        """
+        '''
         The *task* performed before the actual parsing/normalizing: extracting
         the uploaded files.
-        """
+        '''
         # extract the uploaded file
         self._upload_files = ArchiveBasedStagingUploadFiles(
             upload_id=self.upload_id, is_authorized=lambda: True, create=True,
@@ -800,10 +783,10 @@ class Upload(Proc):
             return
 
     def _preprocess_files(self, path):
-        """
+        '''
         Some files need preprocessing. Currently we need to add a stripped POTCAR version
         and always restrict/embargo the original.
-        """
+        '''
         if os.path.basename(path).startswith('POTCAR'):
             # create checksum
             hash = hashlib.sha224()
@@ -829,13 +812,13 @@ class Upload(Proc):
                     self.staging_upload_files.raw_file_object(stripped_path).os_path))
 
     def match_mainfiles(self) -> Generator[Tuple[str, object], None, None]:
-        """
+        '''
         Generator function that matches all files in the upload to all parsers to
         determine the upload's mainfiles.
 
         Returns:
             Tuples of mainfile, filename, and parsers
-        """
+        '''
         directories_with_match: Dict[str, str] = dict()
         upload_files = self.staging_upload_files
         for filename in upload_files.raw_file_manifest():
@@ -859,10 +842,10 @@ class Upload(Proc):
 
     @task
     def parse_all(self):
-        """
+        '''
         The *task* used to identify mainfile/parser combinations among the upload's files, creates
         respective :class:`Calc` instances, and triggers their processing.
-        """
+        '''
         logger = self.get_logger()
 
         with utils.timer(
@@ -882,14 +865,14 @@ class Upload(Proc):
             self.check_join()
 
     def check_join(self):
-        """
+        '''
         Performs an evaluation of the join condition and triggers the :func:`cleanup`
         task if necessary. The join condition allows to run the ``cleanup`` after
         all calculations have been processed. The upload processing stops after all
         calculation processings have been triggered (:func:`parse_all` or
         :func:`re_process_upload`). The cleanup task is then run within the last
         calculation process (the one that triggered the join by calling this method).
-        """
+        '''
         total_calcs = self.total_calcs
         processed_calcs = self.processed_calcs
 
@@ -951,7 +934,7 @@ class Upload(Proc):
                 logger, 'reprocessed staged upload packed', step='delete staged',
                 upload_size=self.upload_files.size):
 
-            staging_upload_files.pack(self.to_upload_with_metadata(), skip_raw=True)
+            staging_upload_files.pack(self.entries_metadata(), skip_raw=True)
 
         with utils.timer(
                 logger, 'reprocessed staged upload deleted', step='delete staged',
@@ -963,10 +946,10 @@ class Upload(Proc):
 
     @task
     def cleanup(self):
-        """
+        '''
         The *task* that "cleans" the processing, i.e. removed obsolete files and performs
         pending archival operations. Depends on the type of processing.
-        """
+        '''
         search.refresh()
 
         if self.current_process == 're_process_upload':
@@ -975,58 +958,58 @@ class Upload(Proc):
             self._cleanup_after_processing()
 
     def get_calc(self, calc_id) -> Calc:
-        """ Returns the upload calc with the given id or ``None``. """
+        ''' Returns the upload calc with the given id or ``None``. '''
         return Calc.objects(upload_id=self.upload_id, calc_id=calc_id).first()
 
     @property
     def processed_calcs(self):
-        """
+        '''
         The number of successfully or not successfully processed calculations. I.e.
         calculations that have finished processing.
-        """
+        '''
         return Calc.objects(upload_id=self.upload_id, tasks_status__in=[SUCCESS, FAILURE]).count()
 
     @property
     def total_calcs(self):
-        """ The number of all calculations. """
+        ''' The number of all calculations. '''
         return Calc.objects(upload_id=self.upload_id).count()
 
     @property
     def failed_calcs(self):
-        """ The number of calculations with failed processing. """
+        ''' The number of calculations with failed processing. '''
         return Calc.objects(upload_id=self.upload_id, tasks_status=FAILURE).count()
 
     @property
     def pending_calcs(self) -> int:
-        """ The number of calculations with pending processing. """
+        ''' The number of calculations with pending processing. '''
         return Calc.objects(upload_id=self.upload_id, tasks_status=PENDING).count()
 
     def all_calcs(self, start, end, order_by=None):
-        """
+        '''
         Returns all calculations, paginated and ordered.
 
         Arguments:
             start: the start index of the requested page
             end: the end index of the requested page
             order_by: the property to order by
-        """
+        '''
         query = Calc.objects(upload_id=self.upload_id)[start:end]
         return query.order_by(order_by) if order_by is not None else query
 
     @property
     def outdated_calcs(self):
-        """ All successfully processed and outdated calculations. """
+        ''' All successfully processed and outdated calculations. '''
         return Calc.objects(
             upload_id=self.upload_id, tasks_status=SUCCESS,
             metadata__nomad_version__ne=config.version)
 
     @property
     def calcs(self):
-        """ All successfully processed calculations. """
+        ''' All successfully processed calculations. '''
         return Calc.objects(upload_id=self.upload_id, tasks_status=SUCCESS)
 
-    def to_upload_with_metadata(self, user_metadata: dict = None) -> UploadWithMetadata:
-        """
+    def entries_metadata(self, user_metadata: dict = None) -> Iterable[datamodel.EntryMetadata]:
+        '''
         This is the :py:mod:`nomad.datamodel` transformation method to transform
         processing uploads into datamodel uploads. It will also implicitely transform
         all calculations of this upload.
@@ -1034,10 +1017,10 @@ class Upload(Proc):
         Arguments:
             user_metadata: A dict of user metadata that is applied to the resulting
                 datamodel data and the respective calculations.
-        """
+        '''
         # prepare user metadata per upload and per calc
         if user_metadata is not None:
-            calc_metadatas: Dict[str, Any] = dict()
+            entries_metadata_dict: Dict[str, Any] = dict()
             upload_metadata: Dict[str, Any] = dict()
 
             upload_metadata.update(user_metadata)
@@ -1045,53 +1028,42 @@ class Upload(Proc):
                 del(upload_metadata['calculations'])
 
             for calc in user_metadata.get('calculations', []):  # pylint: disable=no-member
-                calc_metadatas[calc['mainfile']] = calc
+                entries_metadata_dict[calc['mainfile']] = calc
 
-            user_upload_time = upload_metadata.get('_upload_time', None)
-            user_upload_name = upload_metadata.get('_upload_name', None)
+            user_upload_time = upload_metadata.get('upload_time', None)
+            user_upload_name = upload_metadata.get('upload_name', None)
 
             def get_metadata(calc: Calc):
-                """
-                Assemble metadata from calc's processed calc metadata and the uploads
-                user metadata.
-                """
-                calc_data = calc.metadata
-                calc_with_metadata = datamodel.CalcWithMetadata(**calc_data)
-                calc_metadata = dict(upload_metadata)
-                calc_metadata.update(calc_metadatas.get(calc.mainfile, {}))
-                calc_with_metadata.apply_user_metadata(calc_metadata)
-                if calc_with_metadata.upload_time is None:
-                    calc_with_metadata.upload_time = self.upload_time if user_upload_time is None else user_upload_time
-                if calc_with_metadata.upload_name is None:
-                    calc_with_metadata.upload_name = self.name if user_upload_name is None else user_upload_name
-
-                return calc_with_metadata
+                entry_metadata = datamodel.EntryMetadata.m_from_dict(calc.metadata)
+                entry_user_metadata = dict(upload_metadata)
+                entry_user_metadata.pop('embargo_length', None)  # this is for uploads only
+                entry_user_metadata.update(entries_metadata_dict.get(calc.mainfile, {}))
+                entry_metadata.apply_user_metadata(entry_user_metadata)
+                if entry_metadata.upload_time is None:
+                    entry_metadata.upload_time = self.upload_time if user_upload_time is None else user_upload_time
+                if entry_metadata.upload_name is None:
+                    entry_metadata.upload_name = self.name if user_upload_name is None else user_upload_name
+
+                return entry_metadata
         else:
             user_upload_time = None
 
             def get_metadata(calc: Calc):
-                calc_with_metadata = datamodel.CalcWithMetadata(**calc.metadata)
-                calc_with_metadata.upload_time = self.upload_time
-                calc_with_metadata.upload_name = self.name
-
-                return calc_with_metadata
-
-        result = UploadWithMetadata(
-            upload_id=self.upload_id,
-            uploader=self.user_id,
-            upload_time=self.upload_time if user_upload_time is None else user_upload_time)
+                entry_metadata = datamodel.EntryMetadata.m_from_dict(calc.metadata)
+                entry_metadata.upload_time = self.upload_time
+                entry_metadata.upload_name = self.name
 
-        result.calcs = [get_metadata(calc) for calc in Calc.objects(upload_id=self.upload_id)]
+                return entry_metadata
 
-        return result
+        return [get_metadata(calc) for calc in Calc.objects(upload_id=self.upload_id)]
 
     def compress_and_set_metadata(self, metadata: Dict[str, Any]) -> None:
-        """
+        '''
         Stores the given user metadata in the upload document. This is the metadata
         adhering to the API model (``UploadMetaData``). Most quantities can be stored
         for the upload and for each calculation. This method will try to move same values
         from the calculation to the upload to "compress" the data.
-        """
+        '''
         self.embargo_length = min(metadata.get('embargo_length', 36), 36)
 
         compressed = {
@@ -1103,7 +1075,7 @@ class Upload(Proc):
             compressed_calc: Dict[str, Any] = {}
             calculations.append(compressed_calc)
             for key, value in calc.items():
-                if key in ['_pid', 'mainfile', 'external_id']:
+                if key in ['pid', 'mainfile', 'external_id']:
                     # these quantities are explicitly calc specific and have to stay with
                     # the calc
                     compressed_calc[key] = value
diff --git a/nomad/search.py b/nomad/search.py
index 0644b12f67..d30268fe1e 100644
--- a/nomad/search.py
+++ b/nomad/search.py
@@ -12,22 +12,20 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 This module represents calculations in elastic search.
-"""
+'''
 
-from typing import Iterable, Dict, List, Any
-from elasticsearch_dsl import Document, InnerDoc, Keyword, Text, Date, \
-    Object, Boolean, Search, Q, A, analyzer, tokenizer
-from elasticsearch_dsl.document import IndexMeta
+from typing import Iterable, Dict, List, Any, Union, cast
+from elasticsearch_dsl import Document, InnerDoc, Keyword, Date, \
+    Object, Boolean, Integer, Search, Q, A, analyzer, tokenizer
 import elasticsearch.helpers
 from elasticsearch.exceptions import NotFoundError
 from datetime import datetime
 import json
 
-from nomad import config, datamodel, infrastructure, datamodel, utils, processing as proc
-from nomad.datamodel import Domain
-import nomad.datamodel.base
+from nomad import config, datamodel, infrastructure, datamodel, utils, metainfo, processing as proc
+from nomad.metainfo.search import SearchQuantity
 
 
 path_analyzer = analyzer(
@@ -44,173 +42,374 @@ class ElasticSearchError(Exception): pass
 class ScrollIdNotFound(Exception): pass
 
 
-class User(InnerDoc):
+_elastic_documents: Dict[str, Union[Document, InnerDoc]] = {}
 
-    @classmethod
-    def from_user(cls, user):
-        self = cls(user_id=user.user_id)
-        self.name = user.name
-        self.email = user.email
+search_quantities: Dict[str, SearchQuantity] = {}
+''' All available search quantities by their full qualified name. '''
 
-        return self
+metrics: Dict[str, SearchQuantity] = {}
+'''
+The available search metrics. Metrics are integer values given for each entry that can
+be used in statistics (aggregations), e.g. the sum of all total energy calculations or cardinality of
+all unique geometries.
+'''
+
+groups: Dict[str, SearchQuantity] = {}
+''' The available groupable quantities '''
+
+order_default_quantities: Dict[str, SearchQuantity] = {}
+
+default_statistics: Dict[str, List[SearchQuantity]] = {}
+
+
+# TODO make search the search quantities are initialized even without/before creating an elastic document
+# otherwise a dependency on import order is created
+def create_elastic_document(
+        section: metainfo.Section, document_name: str = None, super_cls=Document,
+        prefix: str = None, domain: str = None,
+        attrs: Dict[str, Any] = None) -> Union[Document, InnerDoc]:
+    '''
+    Create all elasticsearch_dsl mapping classes for the section and its sub sections.
+    '''
+    domain = section.m_x('domain', domain)
+    domain_or_all = domain if domain is not None else '__all__'
+
+    if document_name is None:
+        document_name = section.name
+
+    if attrs is None:
+        attrs = {}
+
+    def get_inner_document(section: metainfo.Section, **kwargs) -> type:
+        inner_document = _elastic_documents.get(section.qualified_name())
+        if inner_document is None:
+            inner_document = create_elastic_document(
+                section, super_cls=InnerDoc, **kwargs)
+
+        return inner_document
+
+    # create an attribute for each sub section
+    for sub_section in section.all_sub_sections.values():
+        sub_section_prefix = sub_section.m_x('search')
+        if sub_section_prefix is None:
+            continue
+
+        if prefix is not None:
+            sub_section_prefix = '%s.%s' % (prefix, sub_section_prefix)
+
+        inner_document = get_inner_document(
+            sub_section.sub_section, domain=domain, prefix=sub_section_prefix)
+        attrs[sub_section.name] = Object(inner_document)
+
+    # create an attribute for each quantity
+    for quantity in section.all_quantities.values():
+        local_search_quantities = quantity.m_x('search')
+
+        if local_search_quantities is None:
+            continue
+
+        if not isinstance(local_search_quantities, List):
+            local_search_quantities = [local_search_quantities]
+
+        for i, search_quantity in enumerate(local_search_quantities):
+            search_quantity.configure(quantity=quantity, prefix=prefix)
+
+            # only prefixed or top-level quantities are considered for being
+            # searched directly. Other nested quantities can only be used via
+            # other search_quantities's es_quantity
+            if prefix is not None or super_cls == Document:
+                qualified_name = search_quantity.qualified_name
+                assert qualified_name not in search_quantities, 'Search quantities must have a unique name: %s' % qualified_name
+                search_quantities[qualified_name] = search_quantity
+
+                if search_quantity.metric is not None:
+                    qualified_metric_name = search_quantity.metric_name
+                    assert qualified_metric_name not in metrics, 'Metric names must be unique: %s' % qualified_metric_name
+                    metrics[qualified_metric_name] = search_quantity
+
+                if search_quantity.group is not None:
+                    qualified_group = search_quantity.group
+                    assert qualified_group not in groups, 'Groups must be unique'
+                    groups[qualified_group] = search_quantity
+
+                if search_quantity.default_statistic:
+                    default_statistics.setdefault(domain_or_all, []).append(search_quantity)
+
+                if search_quantity.order_default:
+                    assert order_default_quantities.get(domain_or_all) is None, 'Only one quantity can be the order default'
+                    order_default_quantities[domain_or_all] = search_quantity
+
+            if i != 0:
+                # only the first quantity gets is mapped, unless the other has an
+                # explicit mapping
+                assert search_quantity.es_mapping is None, 'only the first quantity gets is mapped'
+                continue
+
+            if search_quantity.es_mapping is None:
+                # find a mapping based on quantity type
+                if quantity.type == str:
+                    search_quantity.es_mapping = Keyword()
+                elif quantity.type == int:
+                    search_quantity.es_mapping = Integer()
+                elif quantity.type == bool:
+                    search_quantity.es_mapping = Boolean()
+                elif quantity.type == metainfo.Datetime:
+                    search_quantity.es_mapping = Date()
+                elif isinstance(quantity.type, metainfo.Reference):
+                    inner_document = get_inner_document(quantity.type.target_section_def)
+                    search_quantity.es_mapping = Object(inner_document)
+                elif isinstance(quantity.type, metainfo.MEnum):
+                    search_quantity.es_mapping = Keyword()
+                else:
+                    raise NotImplementedError(
+                        'Quantity type %s for quantity %s is not supported.' % (quantity.type, quantity))
+
+            attrs[quantity.name] = search_quantity.es_mapping
+
+    document = type(document_name, (super_cls,), attrs)
+    _elastic_documents[section.qualified_name()] = document
+    return document
+
+
+# TODO move to a init function that is triggered by elastic setup in infrastructure
+Entry = cast(Document, create_elastic_document(
+    datamodel.EntryMetadata.m_def, document_name='Entry',
+    attrs=dict(Index=type('Index', (), dict(name=config.elastic.index_name)))))
+''' The elasticsearch_dsl Document class that constitutes the entry index. '''
+
+metrics_names = list(metrics.keys())
+''' Names of all available metrics '''
+
+for domain in datamodel.domains:
+    order_default_quantities.setdefault(domain, order_default_quantities.get('__all__'))
+    default_statistics.setdefault(domain, []).append(*default_statistics.get('__all__'))
+
+
+# class User(InnerDoc):
+
+#     @classmethod
+#     def from_user(cls, user):
+#         self = cls(user_id=user.user_id)
+#         self.name = user.name
+#         self.email = user.email
+
+#         return self
+
+#     user_id = Keyword()
+#     email = Keyword()
+#     name = Text(fields={'keyword': Keyword()})
+
+
+# class Dataset(InnerDoc):
+
+#     @classmethod
+#     def from_dataset_id(cls, dataset_id):
+#         dataset = datamodel.Dataset.m_def.m_x('me').get(dataset_id=dataset_id)
+#         return cls(id=dataset.dataset_id, doi=dataset.doi, name=dataset.name, created=dataset.created)
+
+#     id = Keyword()
+#     doi = Keyword()
+#     name = Keyword()
+#     created = Date()
+
+
+# _domain_inner_doc_types: Dict[str, type] = {}
 
-    user_id = Keyword()
-    email = Keyword()
-    name = Text(fields={'keyword': Keyword()})
-
-
-class Dataset(InnerDoc):
-
-    @classmethod
-    def from_dataset_id(cls, dataset_id):
-        dataset = datamodel.Dataset.m_def.m_x('me').get(dataset_id=dataset_id)
-        return cls(id=dataset.dataset_id, doi=dataset.doi, name=dataset.name, created=dataset.created)
-
-    id = Keyword()
-    doi = Keyword()
-    name = Keyword()
-    created = Date()
-
-
-_domain_inner_doc_types: Dict[str, type] = {}
-
-
-class WithDomain(IndexMeta):
-    """ Override elasticsearch_dsl metaclass to sneak in domain specific mappings """
-    def __new__(cls, name, bases, attrs):
-        for domain in Domain.instances.values():
-            inner_doc_type = _domain_inner_doc_types.get(domain.name)
-            if inner_doc_type is None:
-                domain_attrs = {
-                    quantity.elastic_field: quantity.elastic_mapping
-                    for quantity in domain.domain_quantities.values()}
-
-                inner_doc_type = type(domain.name, (InnerDoc,), domain_attrs)
-                _domain_inner_doc_types[domain.name] = inner_doc_type
-
-            attrs[domain.name] = Object(inner_doc_type)
-
-        return super(WithDomain, cls).__new__(cls, name, bases, attrs)
-
-
-class Entry(Document, metaclass=WithDomain):
-
-    class Index:
-        name = config.elastic.index_name
-
-    domain = Keyword()
-    upload_id = Keyword()
-    upload_time = Date()
-    upload_name = Keyword()
-    calc_id = Keyword()
-    calc_hash = Keyword()
-    pid = Keyword()
-    raw_id = Keyword()
-    mainfile = Keyword()
-    files = Text(multi=True, analyzer=path_analyzer, fields={'keyword': Keyword()})
-    uploader = Object(User)
-
-    with_embargo = Boolean()
-    published = Boolean()
-
-    processed = Boolean()
-    last_processing = Date()
-    nomad_version = Keyword()
-    nomad_commit = Keyword()
-
-    authors = Object(User, multi=True)
-    owners = Object(User, multi=True)
-    comment = Text()
-    references = Keyword()
-    datasets = Object(Dataset)
-    external_id = Keyword()
-
-    atoms = Keyword()
-    only_atoms = Keyword()
-    formula = Keyword()
-
-    @classmethod
-    def from_calc_with_metadata(cls, source: datamodel.CalcWithMetadata) -> 'Entry':
-        entry = Entry(meta=dict(id=source.calc_id))
-        entry.update(source)
-        return entry
-
-    def update(self, source: datamodel.CalcWithMetadata) -> None:
-        self.domain = source.domain
-        self.upload_id = source.upload_id
-        self.upload_time = source.upload_time
-        self.upload_name = source.upload_name
-        self.calc_id = source.calc_id
-        self.calc_hash = source.calc_hash
-        self.pid = None if source.pid is None else str(source.pid)
-        self.raw_id = None if source.raw_id is None else str(source.raw_id)
-
-        self.processed = source.processed
-        self.last_processing = source.last_processing
-        self.nomad_version = source.nomad_version
-        self.nomad_commit = source.nomad_commit
-
-        self.mainfile = source.mainfile
-        if source.files is None:
-            self.files = [self.mainfile]
-        elif self.mainfile not in source.files:
-            self.files = [self.mainfile] + source.files
-        else:
-            self.files = source.files
 
-        self.with_embargo = bool(source.with_embargo)
-        self.published = source.published
+# class WithDomain(IndexMeta):
+#     ''' Override elasticsearch_dsl metaclass to sneak in domain specific mappings '''
+#     def __new__(cls, name, bases, attrs):
+#         for domain in Domain.instances.values():
+#             inner_doc_type = _domain_inner_doc_types.get(domain.name)
+#             if inner_doc_type is None:
+#                 domain_attrs = {
+#                     quantity.elastic_field: quantity.elastic_mapping
+#                     for quantity in domain.domain_quantities.values()}
+
+#                 inner_doc_type = type(domain.name, (InnerDoc,), domain_attrs)
+#                 _domain_inner_doc_types[domain.name] = inner_doc_type
+
+#             attrs[domain.name] = Object(inner_doc_type)
+
+#         return super(WithDomain, cls).__new__(cls, name, bases, attrs)
+
+
+# class Entry(Document, metaclass=WithDomain):
 
-        uploader = datamodel.User.get(user_id=source.uploader) if source.uploader is not None else None
-        authors = [datamodel.User.get(user_id) for user_id in source.coauthors]
-        owners = [datamodel.User.get(user_id) for user_id in source.shared_with]
-        if uploader is not None:
-            authors.append(uploader)
-            owners.append(uploader)
-        authors.sort(key=lambda user: user.last_name + ' ' + user.first_name)
-        owners.sort(key=lambda user: user.last_name + ' ' + user.first_name)
+#     class Index:
+#         name = config.elastic.index_name
 
-        self.uploader = User.from_user(uploader) if uploader is not None else None
-        self.authors = [User.from_user(user) for user in authors]
-        self.owners = [User.from_user(user) for user in owners]
+#     domain = Keyword()
+#     upload_id = Keyword()
+#     upload_time = Date()
+#     upload_name = Keyword()
+#     calc_id = Keyword()
+#     calc_hash = Keyword()
+#     pid = Keyword()
+#     raw_id = Keyword()
+#     mainfile = Keyword()
+#     files = Text(multi=True, analyzer=path_analyzer, fields={'keyword': Keyword()})
+#     uploader = Object(User)
+
+#     with_embargo = Boolean()
+#     published = Boolean()
+
+#     processed = Boolean()
+#     last_processing = Date()
+#     nomad_version = Keyword()
+#     nomad_commit = Keyword()
+
+#     authors = Object(User, multi=True)
+#     owners = Object(User, multi=True)
+#     comment = Text()
+#     references = Keyword()
+#     datasets = Object(Dataset)
+#     external_id = Keyword()
+
+#     atoms = Keyword()
+#     only_atoms = Keyword()
+#     formula = Keyword()
+
+#     @classmethod
+#     def from_entry_metadata(cls, source: datamodel.EntryMetadata) -> 'Entry':
+#         entry = Entry(meta=dict(id=source.calc_id))
+#         entry.update(source)
+#         return entry
+
+#     def update(self, source: datamodel.EntryMetadata) -> None:
+#         self.domain = source.domain
+#         self.upload_id = source.upload_id
+#         self.upload_time = source.upload_time
+#         self.upload_name = source.upload_name
+#         self.calc_id = source.calc_id
+#         self.calc_hash = source.calc_hash
+#         self.pid = None if source.pid is None else str(source.pid)
+#         self.raw_id = None if source.raw_id is None else str(source.raw_id)
+
+#         self.processed = source.processed
+#         self.last_processing = source.last_processing
+#         self.nomad_version = source.nomad_version
+#         self.nomad_commit = source.nomad_commit
+
+#         self.mainfile = source.mainfile
+#         if source.files is None:
+#             self.files = [self.mainfile]
+#         elif self.mainfile not in source.files:
+#             self.files = [self.mainfile] + source.files
+#         else:
+#             self.files = source.files
+
+#         self.with_embargo = bool(source.with_embargo)
+#         self.published = source.published
+
+#         uploader = datamodel.User.get(user_id=source.uploader) if source.uploader is not None else None
+#         authors = [datamodel.User.get(user_id) for user_id in source.coauthors]
+#         owners = [datamodel.User.get(user_id) for user_id in source.shared_with]
+#         if uploader is not None:
+#             authors.append(uploader)
+#             owners.append(uploader)
+#         authors.sort(key=lambda user: user.last_name + ' ' + user.first_name)
+#         owners.sort(key=lambda user: user.last_name + ' ' + user.first_name)
+
+#         self.uploader = User.from_user(uploader) if uploader is not None else None
+#         self.authors = [User.from_user(user) for user in authors]
+#         self.owners = [User.from_user(user) for user in owners]
+
+#         self.comment = source.comment
+#         self.references = source.references
+#         self.datasets = [Dataset.from_dataset_id(dataset_id) for dataset_id in source.datasets]
+#         self.external_id = source.external_id
+
+#         self.atoms = source.atoms
+#         self.only_atoms = nomad.datamodel.base.only_atoms(source.atoms)
+#         self.formula = source.formula
+#         self.n_atoms = source.n_atoms
+
+#         if self.domain is not None:
+#             inner_doc_type = _domain_inner_doc_types[self.domain]
+#             inner_doc = inner_doc_type()
+#             for quantity in Domain.instances[self.domain].domain_quantities.values():
+#                 quantity_value = quantity.elastic_value(getattr(source, quantity.metadata_field))
+#                 setattr(inner_doc, quantity.elastic_field, quantity_value)
+
+#             setattr(self, self.domain, inner_doc)
+
+
+def create_entry(section: metainfo.MSection) -> Any:
+    ''' Creates a elasticsearch_dsl document for the given section. '''
+    cls = _elastic_documents[section.m_def.qualified_name()]
+
+    if section.m_def == datamodel.EntryMetadata.m_def:
+        obj = cls(meta=dict(id=section.m_get(datamodel.EntryMetadata.calc_id)))
+    else:
+        obj = cls()
+
+    for quantity in section.m_def.all_quantities.values():
+        search_quantities = quantity.m_x('search')
+        if search_quantities is None:
+            continue
+
+        if not isinstance(search_quantities, list):
+            search_quantities = [search_quantities]
+
+        value = section.m_get(quantity)
+        if value is None or value == []:
+            continue
 
-        self.comment = source.comment
-        self.references = source.references
-        self.datasets = [Dataset.from_dataset_id(dataset_id) for dataset_id in source.datasets]
-        self.external_id = source.external_id
+        for i, search_quantity in enumerate(search_quantities):
+            if i != 0:
+                # Only the value is only written for the first quantity
+                continue
 
-        self.atoms = source.atoms
-        self.only_atoms = nomad.datamodel.base.only_atoms(source.atoms)
-        self.formula = source.formula
-        self.n_atoms = source.n_atoms
+            quantity_type = quantity.type
+            if isinstance(quantity_type, metainfo.Reference):
+                if quantity.is_scalar:
+                    value = create_entry(cast(metainfo.MSection, value))
+                else:
+                    value = [create_entry(item) for item in value]
+
+            elif search_quantity.es_value is not None:
+                value = search_quantity.es_value(section)
 
-        if self.domain is not None:
-            inner_doc_type = _domain_inner_doc_types[self.domain]
-            inner_doc = inner_doc_type()
-            for quantity in Domain.instances[self.domain].domain_quantities.values():
-                quantity_value = quantity.elastic_value(getattr(source, quantity.metadata_field))
-                setattr(inner_doc, quantity.elastic_field, quantity_value)
+            setattr(obj, quantity.name, value)
 
-            setattr(self, self.domain, inner_doc)
+    for sub_section in section.m_def.all_sub_sections.values():
+        if not sub_section.m_x('search'):
+            continue
+
+        if sub_section.repeats:
+            mi_values = list(section.m_get_sub_sections(sub_section))
+            if len(mi_values) == 0:
+                continue
+            value = [create_entry(value) for value in mi_values]
+        else:
+            mi_value = section.m_get_sub_section(sub_section, -1)
+            if mi_value is None:
+                continue
+            value = create_entry(mi_value)
+
+        setattr(obj, sub_section.name, value)
+
+    return obj
 
 
 def delete_upload(upload_id):
-    """ Delete all entries with given ``upload_id`` from the index. """
+    ''' Delete all entries with given ``upload_id`` from the index. '''
     index = Entry._default_index()
     Search(index=index).query('match', upload_id=upload_id).delete()
 
 
 def delete_entry(calc_id):
-    """ Delete the entry with the given ``calc_id`` from the index. """
+    ''' Delete the entry with the given ``calc_id`` from the index. '''
     index = Entry._default_index()
     Search(index=index).query('match', calc_id=calc_id).delete()
 
 
-def publish(calcs: Iterable[datamodel.CalcWithMetadata]) -> None:
-    """ Update all given calcs with their metadata and set ``publish = True``. """
+def publish(calcs: Iterable[datamodel.EntryMetadata]) -> None:
+    ''' Update all given calcs with their metadata and set ``publish = True``. '''
     def elastic_updates():
         for calc in calcs:
-            entry = Entry.from_calc_with_metadata(calc)
+            entry = create_entry(calc)
             entry.published = True
             entry = entry.to_dict(include_meta=True)
             source = entry.pop('_source')
@@ -222,16 +421,16 @@ def publish(calcs: Iterable[datamodel.CalcWithMetadata]) -> None:
     refresh()
 
 
-def index_all(calcs: Iterable[datamodel.CalcWithMetadata], do_refresh=True) -> None:
-    """
+def index_all(calcs: Iterable[datamodel.EntryMetadata], do_refresh=True) -> None:
+    '''
     Adds all given calcs with their metadata to the index.
 
     Returns:
         Number of failed entries.
-    """
+    '''
     def elastic_updates():
         for calc in calcs:
-            entry = Entry.from_calc_with_metadata(calc)
+            entry = create_entry(calc)
             entry = entry.to_dict(include_meta=True)
             entry['_op_type'] = 'index'
             yield entry
@@ -248,36 +447,6 @@ def refresh():
     infrastructure.elastic_client.indices.refresh(config.elastic.index_name)
 
 
-metrics = {
-    metric_name: metric
-    for domain in Domain.instances.values()
-    for metric_name, metric in domain.metrics.items()}
-"""
-The available search metrics. Metrics are integer values given for each entry that can
-be used in statistics (aggregations), e.g. the sum of all total energy calculations or cardinality of
-all unique geometries.
-"""
-
-metrics_names = [metric_name for domain in Domain.instances.values() for metric_name in domain.metrics_names]
-""" Names of all available metrics """
-
-groups = {
-    key: value
-    for domain in Domain.instances.values()
-    for key, value in domain.groups.items()}
-"""The available groupable quantities"""
-
-order_default_quantities = {
-    domain_name: domain.order_default_quantity
-    for domain_name, domain in Domain.instances.items()
-}
-
-default_statistics = {
-    domain_name: domain.default_statistics
-    for domain_name, domain in Domain.instances.items()
-}
-
-
 class SearchRequest:
     '''
     Represents a search request and allows to execute that request.
@@ -313,10 +482,10 @@ class SearchRequest:
         self._search = Search(index=config.elastic.index_name)
 
     def domain(self, domain: str = None):
-        """
+        '''
         Applies the domain of this request to the query. Allows to optionally update
         the domain of this request.
-        """
+        '''
         if domain is not None:
             self._domain = domain
 
@@ -324,7 +493,7 @@ class SearchRequest:
         return self
 
     def owner(self, owner_type: str = 'all', user_id: str = None):
-        """
+        '''
         Uses the query part of the search to restrict the results based on the owner.
         The possible types are: ``all`` for all calculations; ``public`` for
         calculations visible by everyone, excluding embargo-ed entries and entries only visible
@@ -340,7 +509,7 @@ class SearchRequest:
             KeyError: If the given owner_type is not supported
             ValueError: If the owner_type requires a user but none is given, or the
                 given user is not allowed to use the given owner_type.
-        """
+        '''
         if owner_type == 'all':
             q = Q('term', published=True)
             if user_id is not None:
@@ -378,31 +547,31 @@ class SearchRequest:
         return self
 
     def search_parameters(self, **kwargs):
-        """
+        '''
         Configures the existing query with additional search parameters. Kwargs are
         interpreted as key value pairs. Keys have to coresspond to valid entry quantities
         in the domain's (DFT calculations) datamodel. Alternatively search parameters
         can be set via attributes.
-        """
+        '''
         for name, value in kwargs.items():
             self.search_parameter(name, value)
 
         return self
 
     def search_parameter(self, name, value):
-        quantity = Domain.get_quantity(name)
+        quantity = search_quantities[name]
 
-        if quantity.multi and not isinstance(value, list):
+        if quantity.many and not isinstance(value, list):
             value = [value]
 
-        value = quantity.elastic_value(value)
+        if quantity.many_or and isinstance(value, List):
+            self.q &= Q('terms', **{quantity.es_quantity: value})
+            return self
 
-        if quantity.elastic_search_type == 'terms':
-            if not isinstance(value, list):
+        if quantity.derived:
+            if quantity.many and not isinstance(value, list):
                 value = [value]
-            self.q &= Q('terms', **{quantity.qualified_elastic_field: value})
-
-            return self
+            value = quantity.derived(value)
 
         if isinstance(value, list):
             values = value
@@ -410,18 +579,18 @@ class SearchRequest:
             values = [value]
 
         for item in values:
-            self.q &= Q(quantity.elastic_search_type, **{quantity.qualified_elastic_field: item})
+            self.q &= Q('match', **{quantity.es_quantity: item})
 
         return self
 
     def query(self, query):
-        """ Adds the given query as a 'and' (i.e. 'must') clause to the request. """
+        ''' Adds the given query as a 'and' (i.e. 'must') clause to the request. '''
         self._query &= query
 
         return self
 
     def time_range(self, start: datetime, end: datetime):
-        """ Adds a time range to the query. """
+        ''' Adds a time range to the query. '''
         if start is None and end is None:
             return self
 
@@ -436,7 +605,7 @@ class SearchRequest:
 
     @property
     def q(self):
-        """ The underlying elasticsearch_dsl query object """
+        ''' The underlying elasticsearch_dsl query object '''
         if self._query is None:
             return Q('match_all')
         else:
@@ -447,30 +616,30 @@ class SearchRequest:
         self._query = q
 
     def totals(self, metrics_to_use: List[str] = []):
-        """
+        '''
         Configure the request to return overall totals for the given metrics.
 
         The statics are returned with the other quantity statistics under the pseudo
         quantity name 'total'. 'total' contains the pseudo value 'all'. It is used to
         store the metrics aggregated over all entries in the search results.
-        """
+        '''
         self._add_metrics(self._search.aggs, metrics_to_use)
         return self
 
     def default_statistics(self, metrics_to_use: List[str] = []):
-        """
+        '''
         Configures the domain's default statistics.
-        """
-        for name in default_statistics[self._domain]:
+        '''
+        for search_quantity in default_statistics[self._domain]:
             self.statistic(
-                name,
-                Domain.get_quantity(name).aggregations,
+                search_quantity.qualified_name,
+                search_quantity.statistic_size,
                 metrics_to_use=metrics_to_use)
 
         return self
 
     def statistic(self, quantity_name: str, size: int, metrics_to_use: List[str] = []):
-        """
+        '''
         This can be used to display statistics over the searched entries and allows to
         implement faceted search on the top values for each quantity.
 
@@ -493,9 +662,9 @@ class SearchRequest:
             metrics_to_use: The metrics calculated over the aggregations. Can be
                 ``unique_code_runs``, ``datasets``, other domain specific metrics.
                 The basic doc_count metric ``code_runs`` is always given.
-        """
-        quantity = Domain.get_quantity(quantity_name)
-        terms = A('terms', field=quantity.qualified_elastic_field, size=size, order=dict(_key='asc'))
+        '''
+        quantity = search_quantities[quantity_name]
+        terms = A('terms', field=quantity.es_quantity, size=size, order=dict(_key='asc'))
 
         buckets = self._search.aggs.bucket('statistics:%s' % quantity_name, terms)
         self._add_metrics(buckets, metrics_to_use)
@@ -507,24 +676,26 @@ class SearchRequest:
             parent = self._search.aggs
 
         for metric in metrics_to_use:
-            quantity, metric_kind = metrics[metric]
-            field = Domain.get_quantity(quantity).elastic_field
-            parent.metric('metric:%s' % metric, A(metric_kind, field=field))
+            metric_quantity = metrics[metric]
+            field = metric_quantity.es_quantity
+            parent.metric(
+                'metric:%s' % metric_quantity.metric_name,
+                A(metric_quantity.metric, field=field))
 
     def date_histogram(self, metrics_to_use: List[str] = []):
-        """
+        '''
         Adds a date histogram on the given metrics to the statistics part.
-        """
+        '''
         histogram = A('date_histogram', field='upload_time', interval='1M', format='yyyy-MM-dd')
         self._add_metrics(self._search.aggs.bucket('statistics:date_histogram', histogram), metrics_to_use)
 
         return self
 
     def quantities(self, **kwargs):
-        """
+        '''
         Shorthand for adding multiple quantities. See :func:`quantity`. Keywork argument
         keys are quantity name, values are tuples of size and after value.
-        """
+        '''
         for name, spec in kwargs:
             size, after = spec
             self.quantity(name, after=after, size=size)
@@ -534,7 +705,7 @@ class SearchRequest:
     def quantity(
             self, name, size=100, after=None, examples=0, examples_source=None,
             order_by: str = None, order: str = 'desc'):
-        """
+        '''
         Adds a requests for values of the given quantity.
         It allows to scroll through all values via elasticsearch's
         composite aggregations. The response will contain the quantity values and
@@ -564,12 +735,12 @@ class SearchRequest:
                 value bucket is used.
             order:
                 "desc" or "asc"
-        """
+        '''
         if size is None:
             size = 100
 
-        quantity = Domain.get_quantity(name)
-        terms = A('terms', field=quantity.qualified_elastic_field)
+        quantity = search_quantities[name]
+        terms = A('terms', field=quantity.es_quantity)
 
         # We are using elastic searchs 'composite aggregations' here. We do not really
         # compose aggregations, but only those pseudo composites allow us to use the
@@ -597,36 +768,36 @@ class SearchRequest:
         return self
 
     def exclude(self, *args):
-        """ Exclude certain elastic fields from the search results. """
+        ''' Exclude certain elastic fields from the search results. '''
         self._search = self._search.source(excludes=args)
         return self
 
     def include(self, *args):
-        """ Include only the given fields in the search results. """
+        ''' Include only the given fields in the search results. '''
         self._search = self._search.source(includes=args)
         return self
 
     def execute(self):
-        """
+        '''
         Exectutes without returning actual results. Only makes sense if the request
         was configured for statistics or quantity values.
-        """
+        '''
         return self._response(self._search.query(self.q)[0:0].execute())
 
     def execute_scan(self, order_by: str = None, order: int = -1, **kwargs):
-        """
+        '''
         This execute the search as scan. The result will be a generator over the found
         entries. Everything but the query part of this object, will be ignored.
-        """
+        '''
         search = self._search.query(self.q)
 
         if order_by is not None:
-            order_by_quantity = Domain.get_quantity(order_by)
+            order_by_quantity = search_quantities[order_by]
 
             if order == 1:
-                search = search.sort(order_by_quantity.qualified_elastic_field)
+                search = search.sort(order_by_quantity.es_quantity)
             else:
-                search = search.sort('-%s' % order_by_quantity.qualified_elastic_field)
+                search = search.sort('-%s' % order_by_quantity.es_quantity)
 
             search = search.params(preserve_order=True)
 
@@ -636,7 +807,7 @@ class SearchRequest:
     def execute_paginated(
             self, page: int = 1, per_page=10, order_by: str = None,
             order: int = -1):
-        """
+        '''
         Executes the search and returns paginated results. Those are sorted.
 
         Arguments:
@@ -644,21 +815,22 @@ class SearchRequest:
             per_page: The number of entries per page.
             order_by: The quantity to order by.
             order: -1 or 1 for descending or ascending order.
-        """
+        '''
         if order_by is None:
-            order_by = order_default_quantities[self._domain]
+            order_by_quantity = order_default_quantities[self._domain]
+        else:
+            order_by_quantity = search_quantities[order_by]
 
         search = self._search.query(self.q)
 
-        order_by_quantity = Domain.get_quantity(order_by)
-
         if order == 1:
-            search = search.sort(order_by_quantity.qualified_elastic_field)
+            search = search.sort(order_by_quantity.es_quantity)
         else:
-            search = search.sort('-%s' % order_by_quantity.qualified_elastic_field)
+            search = search.sort('-%s' % order_by_quantity.es_quantity)
         search = search[(page - 1) * per_page: page * per_page]
 
         es_result = search.execute()
+
         result = self._response(es_result, with_hits=True)
 
         result.update(pagination=dict(total=result['total'], page=page, per_page=per_page))
@@ -667,7 +839,7 @@ class SearchRequest:
     def execute_scrolled(
             self, scroll_id: str = None, size: int = 1000, scroll: str = u'5m',
             order_by: str = None, order: int = -1):
-        """
+        '''
         Executes a scrolling search. based on ES scroll API. Pagination is replaced with
         scrolling, no ordering is available, no statistics, no quantities will be provided.
 
@@ -687,7 +859,7 @@ class SearchRequest:
                 to this method) in ES time units. Default is 5 minutes.
 
         TODO support order and order_by
-        """
+        '''
         es = infrastructure.elastic_client
 
         if scroll_id is None:
@@ -726,11 +898,11 @@ class SearchRequest:
         return dict(scroll=scroll_info, results=results)
 
     def _response(self, response, with_hits: bool = False) -> Dict[str, Any]:
-        """
+        '''
         Prepares a response object covering the total number of results, hits, statistics,
         and quantities. Other aspects like pagination and scrolling have to be added
         elsewhere.
-        """
+        '''
         result: Dict[str, Any] = dict()
         aggs = response.aggregations.to_dict()
 
@@ -809,24 +981,25 @@ class SearchRequest:
 
 
 def to_calc_with_metadata(results: List[Dict[str, Any]]):
-    """ Translates search results into :class:`CalcWithMetadata` objects read from mongo. """
+    ''' Translates search results into :class:`EntryMetadata` objects read from mongo. '''
     ids = [result['calc_id'] for result in results]
     return [
-        datamodel.CalcWithMetadata(**calc.metadata)
+        datamodel.EntryMetadata.m_from_dict(calc.metadata)
         for calc in proc.Calc.objects(calc_id__in=ids)]
 
 
 def flat(obj, prefix=None):
-    """
+    '''
     Helper that translates nested result objects into flattened dicts with
     ``domain.quantity`` as keys.
-    """
+    '''
     if isinstance(obj, dict):
         result = {}
         for key, value in obj.items():
             if isinstance(value, dict):
+                value = flat(value)
                 for child_key, child_value in value.items():
-                    result['%s.%s' % (key, child_key)] = flat(child_value)
+                    result['%s.%s' % (key, child_key)] = child_value
 
             else:
                 result[key] = value
diff --git a/nomad/utils.py b/nomad/utils.py
index 1cdb8c933b..c2a71853c3 100644
--- a/nomad/utils.py
+++ b/nomad/utils.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 .. autofunc::nomad.utils.create_uuid
 .. autofunc::nomad.utils.hash
 .. autofunc::nomad.utils.timer
@@ -31,7 +31,7 @@ Depending on the configuration all logs will also be send to a central logstash.
 .. autofunc::nomad.utils.create_uuid
 .. autofunc::nomad.utils.timer
 .. autofunc::nomad.utils.lnr
-"""
+'''
 
 from typing import List
 import base64
@@ -53,7 +53,7 @@ from datetime import timedelta
 from nomad import config
 
 default_hash_len = 28
-""" Length of hashes and hash-based ids (e.g. calc, upload) in nomad. """
+''' Length of hashes and hash-based ids (e.g. calc, upload) in nomad. '''
 
 
 def decode_handle_id(handle_str: str):
@@ -73,7 +73,7 @@ def decode_handle_id(handle_str: str):
 
 
 def hash(*args, length: int = default_hash_len) -> str:
-    """ Creates a websave hash of the given length based on the repr of the given arguments. """
+    ''' Creates a websave hash of the given length based on the repr of the given arguments. '''
     hash = hashlib.sha512()
     for arg in args:
         hash.update(str(arg).encode('utf-8'))
@@ -82,7 +82,7 @@ def hash(*args, length: int = default_hash_len) -> str:
 
 
 def make_websave(hash, length: int = default_hash_len) -> str:
-    """ Creates a websave string for a hashlib hash object. """
+    ''' Creates a websave string for a hashlib hash object. '''
     if length > 0:
         return base64.b64encode(hash.digest(), altchars=b'-_')[:length].decode('utf-8')
     else:
@@ -90,30 +90,30 @@ def make_websave(hash, length: int = default_hash_len) -> str:
 
 
 def base64_encode(string):
-    """
+    '''
     Removes any `=` used as padding from the encoded string.
-    """
+    '''
     encoded = base64.urlsafe_b64encode(string).decode('utf-8')
     return encoded.rstrip("=")
 
 
 def base64_decode(string):
-    """
+    '''
     Adds back in the required padding before decoding.
-    """
+    '''
     padding = 4 - (len(string) % 4)
     bytes = (string + ("=" * padding)).encode('utf-8')
     return base64.urlsafe_b64decode(bytes)
 
 
 def sanitize_logevent(event: str) -> str:
-    """
+    '''
     Prepares a log event or message for analysis in elastic stack. It removes numbers,
     list, and matrices of numbers from the event string and limits its size. The
     goal is to make it easier to define aggregations over events by using event
     strings as representatives for event classes rather than event instances (with
     concrete numbers, etc).
-    """
+    '''
     sanitized_event = event[:120]
     sanitized_event = re.sub(r'(\d*\.\d+|\d+(\.\d*)?)', 'X', sanitized_event)
     sanitized_event = re.sub(r'((\[|\()\s*)?X\s*(,\s*X)+(\s*(\]|\)))?', 'L', sanitized_event)
@@ -123,7 +123,7 @@ def sanitize_logevent(event: str) -> str:
 
 @contextmanager
 def legacy_logger(logger):
-    """ Context manager that makes the given logger the logger for legacy log entries. """
+    ''' Context manager that makes the given logger the logger for legacy log entries. '''
     LogstashHandler.legacy_logger = logger
     try:
         yield
@@ -132,14 +132,14 @@ def legacy_logger(logger):
 
 
 class LogstashHandler(logstash.TCPLogstashHandler):
-    """
+    '''
     A log handler that emits records to logstash. It also filters logs for being
     structlog entries. All other entries are diverted to a global `legacy_logger`.
     This legacy logger is supposed to be a structlog logger that turns legacy
     records into structlog entries with reasonable binds depending on the current
     execution context (e.g. parsing/normalizing, etc.). If no legacy logger is
     set, they get emitted as usual (e.g. non nomad logs, celery, dbs, etc.)
-    """
+    '''
 
     legacy_logger = None
 
@@ -349,15 +349,15 @@ def configure_logging():
 
 
 def create_uuid() -> str:
-    """ Returns a web-save base64 encoded random uuid (type 4). """
+    ''' Returns a web-save base64 encoded random uuid (type 4). '''
     return base64.b64encode(uuid.uuid4().bytes, altchars=b'-_').decode('utf-8')[0:-2]
 
 
 def get_logger(name, **kwargs):
-    """
+    '''
     Returns a structlog logger that is already attached with a logstash handler.
     Use additional *kwargs* to pre-bind some values to all events.
-    """
+    '''
     if name.startswith('nomad.'):
         name = '.'.join(name.split('.')[:2])
 
@@ -367,14 +367,14 @@ def get_logger(name, **kwargs):
 
 @contextmanager
 def lnr(logger, event, **kwargs):
-    """
+    '''
     A context manager that Logs aNd Raises all exceptions with the given logger.
 
     Arguments:
         logger: The logger that should be used for logging exceptions.
         event: the log message
         **kwargs: additional properties for the structured log
-    """
+    '''
     try:
         yield
     except HTTPException as e:
@@ -387,7 +387,7 @@ def lnr(logger, event, **kwargs):
 
 @contextmanager
 def timer(logger, event, method='info', **kwargs):
-    """
+    '''
     A context manager that takes execution time and produces a log entry with said time.
 
     Arguments:
@@ -399,7 +399,7 @@ def timer(logger, event, method='info', **kwargs):
 
     Returns:
         The method yields a dictionary that can be used to add further log data.
-    """
+    '''
     start = time.time()
 
     try:
@@ -441,15 +441,15 @@ def to_tuple(self, *args):
 
 
 def chunks(list, n):
-    """ Chunks up the given list into parts of size n. """
+    ''' Chunks up the given list into parts of size n. '''
     for i in range(0, len(list), n):
         yield list[i:i + n]
 
 
 class POPO(dict):
-    """
+    '''
     A dict subclass that uses attributes as key/value pairs.
-    """
+    '''
     def __init__(self, **kwargs):
         super().__init__(**kwargs)
 
@@ -470,10 +470,10 @@ class POPO(dict):
 
 
 class SleepTimeBackoff:
-    """
+    '''
     Provides increasingly larger sleeps. Useful when
     observing long running processes with unknown runtime.
-    """
+    '''
 
     def __init__(self, start_time: float = 0.1, max_time: float = 5):
         self.current_time = start_time
@@ -517,10 +517,10 @@ class ETA:
 
 
 def common_prefix(paths):
-    """
+    '''
     Computes the longest common file path prefix (with respect to '/' separated segments).
     Returns empty string is ne common prefix exists.
-    """
+    '''
     common_prefix = None
 
     for path in paths:
diff --git a/tests/__init__.py b/tests/__init__.py
index e48f987ae9..05d19b7d8a 100644
--- a/tests/__init__.py
+++ b/tests/__init__.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 The nomad@FAIRDI tests are based on the pytest library. Pytest uses *fixtures* to
 modularize setup and teardown of mocks, infrastructure, and other context objects.
 The following depicts the used hierarchy of fixtures:
@@ -20,7 +20,7 @@ The following depicts the used hierarchy of fixtures:
 .. image:: test_fixtures.png
 
 Otherwise the test submodules follow the names of the nomad code modules.
-"""
+'''
 
 from nomad import config
 
diff --git a/tests/app/resource.py b/tests/app/resource.py
index 877031f9df..7215b4bee7 100644
--- a/tests/app/resource.py
+++ b/tests/app/resource.py
@@ -12,10 +12,10 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 API endpoints that cause various scenerios to test general API aspects like logging,
 error handling, etc.
-"""
+'''
 
 from flask_restplus import Resource
 
diff --git a/tests/app/test_api.py b/tests/app/test_api.py
index 041825488e..5d37a08747 100644
--- a/tests/app/test_api.py
+++ b/tests/app/test_api.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from typing import Any
+from typing import Any, Iterable
 import pytest
 import time
 import json
@@ -30,7 +30,7 @@ from nomad.app.api.auth import generate_upload_token
 from nomad import search, parsing, files, config, utils, infrastructure
 from nomad.files import UploadFiles, PublicUploadFiles
 from nomad.processing import Upload, Calc, SUCCESS
-from nomad.datamodel import UploadWithMetadata, CalcWithMetadata, User, Dataset
+from nomad.datamodel import EntryMetadata, User, Dataset
 
 from tests.conftest import create_auth_headers, clear_elastic, create_test_structure
 from tests.test_files import example_file, example_file_mainfile, example_file_contents
@@ -56,12 +56,11 @@ def test_user_signature_token(api, test_user_auth):
     return json.loads(rv.data)['signature_token']
 
 
-def get_upload_with_metadata(upload: dict) -> UploadWithMetadata:
-    """ Create a :class:`UploadWithMetadata` from a API upload json record. """
-    return UploadWithMetadata(
-        upload_id=upload['upload_id'], calcs=[
-            CalcWithMetadata(domain='dft', calc_id=calc['calc_id'], mainfile=calc['mainfile'])
-            for calc in upload['calcs']['results']])
+def get_upload_entries_metadata(upload: dict) -> Iterable[EntryMetadata]:
+    ''' Create a iterable of :class:`EntryMetadata` from a API upload json record. '''
+    return [
+        EntryMetadata(domain='dft', calc_id=entry['calc_id'], mainfile=entry['mainfile'])
+        for entry in upload['calcs']['results']]
 
 
 def assert_zip_file(rv, files: int = -1, basename: bool = None):
@@ -233,16 +232,14 @@ class TestUploads:
             upload = self.assert_upload(rv.data)
             assert len(upload['calcs']['results']) == 1
 
-        upload_with_metadata = get_upload_with_metadata(upload)
-        assert_upload_files(upload_with_metadata, files.StagingUploadFiles)
-        assert_search_upload(upload_with_metadata, additional_keys=['atoms', 'dft.system'])
+        entries = get_upload_entries_metadata(upload)
+        assert_upload_files(upload_id, entries, files.StagingUploadFiles)
+        assert_search_upload(entries, additional_keys=['atoms', 'dft.system'])
 
     def assert_published(self, api, test_user_auth, upload_id, proc_infra, metadata={}):
         rv = api.get('/uploads/%s' % upload_id, headers=test_user_auth)
         upload = self.assert_upload(rv.data)
 
-        upload_with_metadata = get_upload_with_metadata(upload)
-
         rv = api.post(
             '/uploads/%s' % upload_id,
             headers=test_user_auth,
@@ -263,10 +260,22 @@ class TestUploads:
         assert upload_proc is not None
         assert upload_proc.published is True
         assert upload_proc.embargo_length == min(36, metadata.get('embargo_length', 36))
-        upload_with_metadata = upload_proc.to_upload_with_metadata()
+        entries = upload_proc.entries_metadata()
 
-        assert_upload_files(upload_with_metadata, files.PublicUploadFiles, published=True)
-        assert_search_upload(upload_with_metadata, additional_keys=additional_keys, published=True)
+        for entry in entries:
+            for key, transform in {
+                    'comment': lambda e: e.comment,
+                    'with_embargo': lambda e: e.with_embargo,
+                    'references': lambda e: e.references,
+                    'coauthors': lambda e: [u.user_id for u in e.coauthors],
+                    '_uploader': lambda e: e.uploader.user_id,
+                    '_pid': lambda e: e.pid,
+                    'external_id': lambda e: e.external_id}.items():
+                if key in metadata:
+                    assert transform(entry) == metadata[key], key
+
+        assert_upload_files(upload_id, entries, files.PublicUploadFiles, published=True)
+        assert_search_upload(entries, additional_keys=additional_keys, published=True)
 
     def block_until_completed(self, api, upload_id: str, test_user_auth):
         while True:
@@ -504,6 +513,7 @@ class TestUploads:
 
 
 today = datetime.datetime.utcnow().date()
+today_datetime = datetime.datetime(*today.timetuple()[:6])
 
 
 class UploadFilesBasedTests:
@@ -590,9 +600,9 @@ class UploadFilesBasedTests:
         calc_specs = 'r' if restricted else 'p'
         Upload.create(user=test_user, upload_id='test_upload')
         if in_staging:
-            _, upload_files = create_staging_upload('test_upload', calc_specs=calc_specs)
+            _, _, upload_files = create_staging_upload('test_upload', calc_specs=calc_specs)
         else:
-            _, upload_files = create_public_upload('test_upload', calc_specs=calc_specs)
+            _, _, upload_files = create_public_upload('test_upload', calc_specs=calc_specs)
 
         yield 'test_upload', authorized, auth_headers
 
@@ -697,34 +707,35 @@ class TestRepo():
             dataset_id='ds_id', name='ds_name', user_id=test_user.user_id, doi='ds_doi')
         example_dataset.m_x('me').create()
 
-        calc_with_metadata = CalcWithMetadata(
-            domain='dft', upload_id='example_upload_id', calc_id='0', upload_time=today)
-        calc_with_metadata.files = ['test/mainfile.txt']
-        calc_with_metadata.apply_domain_metadata(normalized)
+        entry_metadata = EntryMetadata(
+            domain='dft', upload_id='example_upload_id', calc_id='0', upload_time=today_datetime)
+        entry_metadata.files = ['test/mainfile.txt']
+        entry_metadata.apply_domain_metadata(normalized)
 
-        calc_with_metadata.update(datasets=[example_dataset.dataset_id])
+        entry_metadata.m_update(datasets=[example_dataset.dataset_id])
 
-        calc_with_metadata.update(
+        entry_metadata.m_update(
             calc_id='1', uploader=test_user.user_id, published=True, with_embargo=False)
-        search.Entry.from_calc_with_metadata(calc_with_metadata).save(refresh=True)
+        search.create_entry(entry_metadata).save(refresh=True)
 
-        calc_with_metadata.update(
+        entry_metadata.m_update(
             calc_id='2', uploader=other_test_user.user_id, published=True,
-            with_embargo=False, pid=2, upload_time=today - datetime.timedelta(days=5),
+            with_embargo=False, pid=2, upload_time=today_datetime - datetime.timedelta(days=5),
             external_id='external_2')
-        calc_with_metadata.update(
-            atoms=['Fe'], comment='this is a specific word', formula='AAA', basis_set='zzz')
-        search.Entry.from_calc_with_metadata(calc_with_metadata).save(refresh=True)
+        entry_metadata.m_update(
+            atoms=['Fe'], comment='this is a specific word', formula='AAA')
+        entry_metadata.dft.basis_set = 'zzz'
+        search.create_entry(entry_metadata).save(refresh=True)
 
-        calc_with_metadata.update(
+        entry_metadata.m_update(
             calc_id='3', uploader=other_test_user.user_id, published=False,
             with_embargo=False, pid=3, external_id='external_3')
-        search.Entry.from_calc_with_metadata(calc_with_metadata).save(refresh=True)
+        search.create_entry(entry_metadata).save(refresh=True)
 
-        calc_with_metadata.update(
+        entry_metadata.m_update(
             calc_id='4', uploader=other_test_user.user_id, published=True,
             with_embargo=True, pid=4, external_id='external_4')
-        search.Entry.from_calc_with_metadata(calc_with_metadata).save(refresh=True)
+        search.create_entry(entry_metadata).save(refresh=True)
 
         yield
 
@@ -780,28 +791,27 @@ class TestRepo():
         assert rv.status_code == 404
 
     def test_search_datasets(self, api, example_elastic_calcs, no_warn, other_test_user_auth):
-        rv = api.get('/repo/?owner=all&datasets=true', headers=other_test_user_auth)
+        rv = api.get('/repo/?owner=all&group_datasets=true', headers=other_test_user_auth)
         data = self.assert_search(rv, 4)
 
         datasets = data.get('datasets', None)
         assert datasets is not None
         values = datasets['values']
         assert values['ds_id']['total'] == 4
-        assert values['ds_id']['examples'][0]['datasets'][0]['id'] == 'ds_id'
+        assert values['ds_id']['examples'][0]['datasets'][0]['dataset_id'] == 'ds_id'
         assert 'after' in datasets
         assert 'datasets' in data['statistics']['total']['all']
         assert data['statistics']['total']['all']['datasets'] > 0
 
     def test_search_uploads(self, api, example_elastic_calcs, no_warn, other_test_user_auth):
-        rv = api.get('/repo/?owner=all&uploads=true', headers=other_test_user_auth)
+        rv = api.get('/repo/?owner=all&group_uploads=true', headers=other_test_user_auth)
         data = self.assert_search(rv, 4)
 
         uploads = data.get('uploads', None)
         assert uploads is not None
         values = uploads['values']
-        # the 4 uploads have "example upload id", but 3 have newer upload time. Therefore,
-        # only 3 calc will be in the last (and therefore used) bucket of 'example_upload_id'.
-        assert values['example_upload_id']['total'] == 3
+
+        assert values['example_upload_id']['total'] == 4
         assert values['example_upload_id']['examples'][0]['upload_id'] == 'example_upload_id'
         assert 'after' in uploads
         assert 'uploads' in data['statistics']['total']['all']
@@ -930,10 +940,10 @@ class TestRepo():
     def test_search_aggregation_metrics(self, api, example_elastic_calcs, no_warn, metrics):
         rv = api.get('/repo/?%s' % urlencode({
             'metrics': metrics,
-            'statistics': True,
-            'dft.groups': True,
-            'datasets': True,
-            'uploads': True}, doseq=True))
+            'group_statistics': True,
+            'group_dft.groups': True,
+            'group_datasets': True,
+            'group_uploads': True}, doseq=True))
 
         assert rv.status_code == 200
         data = json.loads(rv.data)
@@ -1169,10 +1179,10 @@ class TestEditRepo():
             create_test_structure(meta_info, id, 2, 1, [], 0, metadata=metadata)
 
         entries = [
-            dict(calc_id='1', upload_id='upload_1', user=test_user, published=True, embargo=False),
-            dict(calc_id='2', upload_id='upload_2', user=test_user, published=True, embargo=True),
-            dict(calc_id='3', upload_id='upload_2', user=test_user, published=False, embargo=False),
-            dict(calc_id='4', upload_id='upload_3', user=other_test_user, published=True, embargo=False)]
+            dict(calc_id='1', upload_id='upload_1', user=test_user, published=True, with_embargo=False),
+            dict(calc_id='2', upload_id='upload_2', user=test_user, published=True, with_embargo=True),
+            dict(calc_id='3', upload_id='upload_2', user=test_user, published=False, with_embargo=False),
+            dict(calc_id='4', upload_id='upload_3', user=other_test_user, published=True, with_embargo=False)]
 
         i = 0
         for entry in entries:
@@ -1253,6 +1263,7 @@ class TestEditRepo():
             shared_with=[other_test_user.user_id])
         rv = self.perform_edit(**edit_data, query=dict(upload_id='upload_1'))
         result = json.loads(rv.data)
+        assert rv.status_code == 200
         actions = result.get('actions')
         for key in edit_data:
             assert key in actions
@@ -1393,7 +1404,7 @@ def test_edit_lift_embargo(api, published, other_test_user_auth):
                 }
             }
         }))
-    assert rv.status_code == 200
+    assert rv.status_code == 200, rv.data
     assert not Calc.objects(calc_id=example_calc.calc_id).first().metadata['with_embargo']
 
     Upload.get(published.upload_id).block_until_complete()
@@ -1780,13 +1791,13 @@ class TestDataset:
 
     @pytest.fixture()
     def example_dataset_with_entry(self, mongo, elastic, example_datasets):
-        calc = CalcWithMetadata(
+        entry_metadata = EntryMetadata(
             domain='dft', calc_id='1', upload_id='1', published=True, with_embargo=False,
             datasets=['1'])
         Calc(
             calc_id='1', upload_id='1', create_time=datetime.datetime.now(),
-            metadata=calc.to_dict()).save()
-        search.Entry.from_calc_with_metadata(calc).save()
+            metadata=entry_metadata.m_to_dict()).save()
+        search.create_entry(entry_metadata).save()
         search.refresh()
 
     def test_delete_dataset(self, api, test_user_auth, example_dataset_with_entry):
@@ -1818,12 +1829,12 @@ class TestDataset:
         assert rv.status_code == 400
 
     def test_assign_doi_unpublished(self, api, test_user_auth, example_datasets):
-        calc = CalcWithMetadata(
+        entry_metadata = EntryMetadata(
             domain='dft', calc_id='1', upload_id='1', published=False, with_embargo=False,
             datasets=['1'])
         Calc(
             calc_id='1', upload_id='1', create_time=datetime.datetime.now(),
-            metadata=calc.to_dict()).save()
+            metadata=entry_metadata.m_to_dict()).save()
         rv = api.post('/datasets/ds1', headers=test_user_auth)
         assert rv.status_code == 400
 
diff --git a/tests/app/test_optimade.py b/tests/app/test_optimade.py
index f63a7d7bd0..c531bbf350 100644
--- a/tests/app/test_optimade.py
+++ b/tests/app/test_optimade.py
@@ -36,10 +36,10 @@ def test_get_entry(published: Upload):
         data = json.load(f)
     assert 'OptimadeEntry' in data
     search_result = search.SearchRequest().search_parameter('calc_id', calc_id).execute_paginated()['results'][0]
-    assert 'dft.optimade' in search.flat(search_result)
+    assert 'dft.optimade.chemical_formula_hill' in search.flat(search_result)
 
 
-def test_no_optimade(meta_info, elastic, api):
+def test_no_optimade(meta_info, mongo, elastic, api):
     create_test_structure(meta_info, 1, 2, 1, [], 0)
     create_test_structure(meta_info, 2, 2, 1, [], 0, optimade=False)
     search.refresh()
diff --git a/tests/bravado_flask.py b/tests/bravado_flask.py
index 2616a1bfe7..35c83c279a 100644
--- a/tests/bravado_flask.py
+++ b/tests/bravado_flask.py
@@ -25,7 +25,7 @@ class FlaskTestHttpClient(HttpClient):
         self._headers = headers
 
     def request(self, request_params, *args, **kwargs):
-        """
+        '''
         Taken from `bravado.http_client.HttpClient`.
 
         Args:
@@ -40,7 +40,7 @@ class FlaskTestHttpClient(HttpClient):
                 `bravado.http_future.HttpFuture`.
         Returns:
             `bravado_core.http_future.HttpFuture`: HTTP Future object
-        """
+        '''
         request_params.setdefault('headers', {}).update(self._headers)
         test_future = FlaskTestFutureAdapter(request_params, self._flask_client)
 
@@ -48,7 +48,7 @@ class FlaskTestHttpClient(HttpClient):
 
 
 class FlaskTestFutureAdapter:
-    """
+    '''
     Mimics a :class:`concurrent.futures.Future` for the purposes of making it work with
     Bravado's :class:`bravado.http_future.HttpFuture` when simulating calls to a Falcon API.
     Those calls will be validated by Bravado.
@@ -59,7 +59,7 @@ class FlaskTestFutureAdapter:
         falcon_api (`falcon.API`): API object to send the request to.
         response_encoding (str): Encoding that will be used to decode response's body.
             If set to None then the body won't be decoded.
-    """
+    '''
 
     def __init__(self, request_params, flask_client, response_encoding='utf-8'):
         self._flask_client = flask_client
@@ -70,10 +70,10 @@ class FlaskTestFutureAdapter:
         self.connection_errors = None
 
     def result(self, **_):
-        """
+        '''
         Args:
             **_: Ignore all the keyword arguments (right now it's just timeout) passed by Bravado.
-        """
+        '''
         # Bravado will create the URL by appending request path to 'http://localhost'
         path = self._request_params['url'].replace('http://localhost', '')
         method = self._request_params.get('method')
@@ -100,54 +100,54 @@ class FlaskTestFutureAdapter:
 
 
 class FlaskTestResponseAdapter(IncomingResponse):
-    """
+    '''
     Wraps a response from Falcon test client to provide a uniform interface
     expected by Bravado's :class:`bravado.http_future.HttpFuture`.
     Args:
         flask_response: Response to a call simulated with flask's test client.
-    """
+    '''
 
     def __init__(self, flask_response):
         self._response = flask_response
 
     @property
     def status_code(self):
-        """
+        '''
         Returns:
             int: HTTP status code
-        """
+        '''
         return self._response.status_code
 
     @property
     def text(self):
-        """
+        '''
         Returns:
             str: Textual representation of the response's body.
-        """
+        '''
         return self._response.data
 
     @property
     def reason(self):
-        """
+        '''
         Returns:
             str: Reason-phrase of the HTTP response (e.g. "OK", or "Not Found")
-        """
+        '''
         # status codes from Falcon look like this: "200 OK"
         return self._response.status[4:]
 
     @property
     def headers(self):
-        """
+        '''
         Returns:
             dict: Headers attached to the response.
-        """
+        '''
         return self._response.headers
 
     def json(self, **kwargs):
-        """
+        '''
         Args:
             **kwargs: This is a part of the interface, but we don't do anything with it.
         Returns:
             dict: JSON representation of the response's body.
-        """
+        '''
         return json.loads(self._response.data)
diff --git a/tests/conftest.py b/tests/conftest.py
index 9584b3dd8e..ff8713b2c8 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -35,7 +35,7 @@ from nomadcore.local_meta_info import loadJsonFile
 import nomad_meta_info
 
 from nomad import config, infrastructure, parsing, processing, app, search, utils
-from nomad.datamodel import User, CalcWithMetadata
+from nomad.datamodel import User, EntryMetadata
 from nomad.parsing import LocalBackend
 
 from tests import test_parsing, test_normalizing
@@ -77,7 +77,7 @@ def raw_files_infra():
 
 @pytest.fixture(scope='function')
 def raw_files(raw_files_infra):
-    """ Provides cleaned out files directory structure per function. Clears files after test. """
+    ''' Provides cleaned out files directory structure per function. Clears files after test. '''
     directories = [config.fs.staging, config.fs.public, config.fs.tmp]
     for directory in directories:
         if not os.path.exists(directory):
@@ -123,10 +123,10 @@ def celery_config():
 
 @pytest.fixture(scope='session')
 def purged_app(celery_session_app):
-    """
+    '''
     Purges all pending tasks of the celery app before test. This is necessary to
     remove tasks from the queue that might be 'left over' from prior tests.
-    """
+    '''
     celery_session_app.control.purge()
     yield celery_session_app
 
@@ -140,7 +140,7 @@ def celery_inspect(purged_app):
 # 'bleeding' into successive tests.
 @pytest.fixture(scope='function')
 def worker(mongo, celery_session_worker, celery_inspect):
-    """ Provides a clean worker (no old tasks) per function. Waits for all tasks to be completed. """
+    ''' Provides a clean worker (no old tasks) per function. Waits for all tasks to be completed. '''
     yield
 
     # wait until there no more active tasks, to leave clean worker and queues for the next
@@ -164,7 +164,7 @@ def mongo_infra(monkeysession):
 
 @pytest.fixture(scope='function')
 def mongo(mongo_infra):
-    """ Provides a cleaned mocked mongo per function. """
+    ''' Provides a cleaned mocked mongo per function. '''
     # Some test cases need to reset the database connection
     if infrastructure.mongo_client != mongo_infra:
         mongo_infra = infrastructure.mongo_client
@@ -174,7 +174,7 @@ def mongo(mongo_infra):
 
 @pytest.fixture(scope='session')
 def elastic_infra(monkeysession):
-    """ Provides elastic infrastructure to the session """
+    ''' Provides elastic infrastructure to the session '''
     monkeysession.setattr('nomad.config.elastic.index_name', 'nomad_fairdi_test')
     try:
         return infrastructure.setup_elastic()
@@ -199,7 +199,7 @@ def clear_elastic(elastic):
 
 @pytest.fixture(scope='function')
 def elastic(elastic_infra):
-    """ Provides a clean elastic per function. Clears elastic before test. """
+    ''' Provides a clean elastic per function. Clears elastic before test. '''
     clear_elastic(elastic_infra)
 
     assert infrastructure.elastic_client is not None
@@ -280,7 +280,7 @@ def keycloak(monkeypatch):
 
 @pytest.fixture(scope='function')
 def proc_infra(worker, elastic, mongo, raw_files):
-    """ Combines all fixtures necessary for processing (elastic, worker, files, mongo) """
+    ''' Combines all fixtures necessary for processing (elastic, worker, files, mongo) '''
     return dict(elastic=elastic)
 
 
@@ -384,10 +384,10 @@ def with_warn(caplog):
     assert count > 0
 
 
-"""
+'''
 Fixture for mocked SMTP server for testing.
 Based on https://gist.github.com/akheron/cf3863cdc424f08929e4cb7dc365ef23.
-"""
+'''
 
 RecordedMessage = namedtuple(
     'RecordedMessage',
@@ -527,31 +527,38 @@ def example_user_metadata(other_test_user, test_user) -> dict:
     }
 
 
+@pytest.fixture(scope='module')
+def internal_example_user_metadata(example_user_metadata) -> dict:
+    return {
+        key[1:] if key[0] == '_' else key: value
+        for key, value in example_user_metadata.items()}
+
+
 @pytest.fixture(scope='session')
 def parsed(example_mainfile: Tuple[str, str]) -> parsing.LocalBackend:
-    """ Provides a parsed calculation in the form of a LocalBackend. """
+    ''' Provides a parsed calculation in the form of a LocalBackend. '''
     parser, mainfile = example_mainfile
     return test_parsing.run_parser(parser, mainfile)
 
 
 @pytest.fixture(scope='session')
 def parsed_ems() -> parsing.LocalBackend:
-    """ Provides a parsed experiment in the form of a LocalBackend. """
+    ''' Provides a parsed experiment in the form of a LocalBackend. '''
     return test_parsing.run_parser('parsers/skeleton', 'tests/data/parsers/skeleton/example.metadata.json')
 
 
 @pytest.fixture(scope='session')
 def normalized(parsed: parsing.LocalBackend) -> parsing.LocalBackend:
-    """ Provides a normalized calculation in the form of a LocalBackend. """
+    ''' Provides a normalized calculation in the form of a LocalBackend. '''
     return test_normalizing.run_normalize(parsed)
 
 
 @pytest.fixture(scope='function')
 def uploaded(example_upload: str, raw_files) -> Tuple[str, str]:
-    """
+    '''
     Provides a uploaded with uploaded example file and gives the upload_id.
     Clears files after test.
-    """
+    '''
     example_upload_id = os.path.basename(example_upload).replace('.zip', '')
     return example_upload_id, example_upload
 
@@ -565,9 +572,9 @@ def non_empty_uploaded(non_empty_example_upload: str, raw_files) -> Tuple[str, s
 @pytest.mark.timeout(config.tests.default_timeout)
 @pytest.fixture(scope='function')
 def processed(uploaded: Tuple[str, str], test_user: User, proc_infra) -> processing.Upload:
-    """
+    '''
     Provides a processed upload. Upload was uploaded with test_user.
-    """
+    '''
     return test_processing.run_processing(uploaded, test_user)
 
 
@@ -586,19 +593,19 @@ def processeds(non_empty_example_upload: str, test_user: User, proc_infra) -> Li
 @pytest.mark.timeout(config.tests.default_timeout)
 @pytest.fixture(scope='function')
 def non_empty_processed(non_empty_uploaded: Tuple[str, str], test_user: User, proc_infra) -> processing.Upload:
-    """
+    '''
     Provides a processed upload. Upload was uploaded with test_user.
-    """
+    '''
     return test_processing.run_processing(non_empty_uploaded, test_user)
 
 
 @pytest.mark.timeout(config.tests.default_timeout)
 @pytest.fixture(scope='function')
-def published(non_empty_processed: processing.Upload, example_user_metadata) -> processing.Upload:
-    """
+def published(non_empty_processed: processing.Upload, internal_example_user_metadata) -> processing.Upload:
+    '''
     Provides a processed upload. Upload was uploaded with test_user.
-    """
-    non_empty_processed.compress_and_set_metadata(example_user_metadata)
+    '''
+    non_empty_processed.compress_and_set_metadata(internal_example_user_metadata)
     non_empty_processed.publish_upload()
     try:
         non_empty_processed.block_until_complete(interval=.01)
@@ -611,9 +618,9 @@ def published(non_empty_processed: processing.Upload, example_user_metadata) ->
 @pytest.mark.timeout(config.tests.default_timeout)
 @pytest.fixture(scope='function')
 def published_wo_user_metadata(non_empty_processed: processing.Upload) -> processing.Upload:
-    """
+    '''
     Provides a processed upload. Upload was uploaded with test_user.
-    """
+    '''
     non_empty_processed.publish_upload()
     try:
         non_empty_processed.block_until_complete(interval=.01)
@@ -625,7 +632,7 @@ def published_wo_user_metadata(non_empty_processed: processing.Upload) -> proces
 
 @pytest.fixture
 def reset_config():
-    """ Fixture that resets configuration. """
+    ''' Fixture that resets configuration. '''
     service = config.service
     log_level = config.console_log_level
     yield None
@@ -636,14 +643,14 @@ def reset_config():
 
 @pytest.fixture
 def reset_infra(mongo, elastic):
-    """ Fixture that resets infrastructure after deleting db or search index. """
+    ''' Fixture that resets infrastructure after deleting db or search index. '''
     yield None
 
 
 def create_test_structure(
         meta_info, id: int, h: int, o: int, extra: List[str], periodicity: int,
         optimade: bool = True, metadata: dict = None):
-    """ Creates a calculation in Elastic and Mongodb with the given properties.
+    ''' Creates a calculation in Elastic and Mongodb with the given properties.
 
     Does require initialized :func:`elastic_infra` and :func:`mongo_infra`.
 
@@ -656,7 +663,7 @@ def create_test_structure(
         periodicity: The number of dimensions to repeat the structure in
         optimade: A boolean. Iff true the entry will have optimade metadata. Default is True.
         metadata: Additional (user) metadata.
-    """
+    '''
 
     atom_labels = ['H' for i in range(0, h)] + ['O' for i in range(0, o)] + extra
     test_vector = np.array([0, 0, 0])
@@ -679,19 +686,19 @@ def create_test_structure(
     backend.closeSection('section_run', 0)
 
     backend = run_normalize(backend)
-    calc = CalcWithMetadata(
+    calc = EntryMetadata(
         domain='dft', upload_id='test_uload_id', calc_id='test_calc_id_%d' % id,
         mainfile='test_mainfile', published=True, with_embargo=False)
     calc.apply_domain_metadata(backend)
     if metadata is not None:
-        calc.update(**metadata)
+        calc.m_update(**metadata)
 
     if not optimade:
-        calc.optimade = None  # type: ignore
+        calc.dft.optimade = None
 
-    proc_calc = processing.Calc.from_calc_with_metadata(calc)
+    proc_calc = processing.Calc.from_entry_metadata(calc)
     proc_calc.save()
-    search_entry = search.Entry.from_calc_with_metadata(calc)
+    search_entry = search.create_entry(calc)
     search_entry.save()
 
     assert processing.Calc.objects(calc_id__in=[calc.calc_id]).count() == 1
diff --git a/tests/data/parsers/octopus/stdout.txt b/tests/data/parsers/octopus/stdout.txt
index 2b43895be9..94b5baabf3 100644
--- a/tests/data/parsers/octopus/stdout.txt
+++ b/tests/data/parsers/octopus/stdout.txt
@@ -7,7 +7,7 @@
                    _.._     |0) ~ (0) |    _.---'`__.-( (_.
             __.--'`_.. '.__.\    '--. \_.-' ,.--'`     `""`
            ( ,.--'`   ',__ /./;   ;, '.__.'`    __
-           _`) )  .---.__.' / |   |\   \__..--""  """--.,_
+           _`) )  .---.__.' / |   |\   \__..--""  '''--.,_
           `---' .'.''-._.-'`_./  /\ '.  \ _.-~~~````~~~-._`-.__.'
                 | |  .' _.-' |  |  \  \  '.               `~---`
                  \ \/ .'     \  \   '. '-._)
diff --git a/tests/processing/test_data.py b/tests/processing/test_data.py
index 537aa2c847..2a2f401d14 100644
--- a/tests/processing/test_data.py
+++ b/tests/processing/test_data.py
@@ -129,9 +129,9 @@ def test_processing_with_large_dir(test_user, proc_infra):
         assert len(calc.warnings) == 1
 
 
-def test_publish(non_empty_processed: Upload, no_warn, example_user_metadata, monkeypatch):
+def test_publish(non_empty_processed: Upload, no_warn, internal_example_user_metadata, monkeypatch):
     processed = non_empty_processed
-    processed.compress_and_set_metadata(example_user_metadata)
+    processed.compress_and_set_metadata(internal_example_user_metadata)
 
     additional_keys = ['with_embargo']
 
@@ -141,17 +141,17 @@ def test_publish(non_empty_processed: Upload, no_warn, example_user_metadata, mo
     except Exception:
         pass
 
-    upload = processed.to_upload_with_metadata(example_user_metadata)
+    entries = processed.entries_metadata(internal_example_user_metadata)
 
-    assert_upload_files(upload, PublicUploadFiles, published=True)
-    assert_search_upload(upload, additional_keys, published=True)
+    assert_upload_files(processed.upload_id, entries, PublicUploadFiles, published=True)
+    assert_search_upload(entries, additional_keys, published=True)
 
-    assert_processing(Upload.get(upload.upload_id, include_published=True), published=True)
+    assert_processing(Upload.get(processed.upload_id, include_published=True), published=True)
 
 
-def test_republish(non_empty_processed: Upload, no_warn, example_user_metadata, monkeypatch):
+def test_republish(non_empty_processed: Upload, no_warn, internal_example_user_metadata, monkeypatch):
     processed = non_empty_processed
-    processed.compress_and_set_metadata(example_user_metadata)
+    processed.compress_and_set_metadata(internal_example_user_metadata)
 
     additional_keys = ['with_embargo']
 
@@ -162,20 +162,20 @@ def test_republish(non_empty_processed: Upload, no_warn, example_user_metadata,
     processed.publish_upload()
     processed.block_until_complete(interval=.01)
 
-    upload = processed.to_upload_with_metadata(example_user_metadata)
+    entries = processed.entries_metadata(internal_example_user_metadata)
 
-    assert_upload_files(upload, PublicUploadFiles, published=True)
-    assert_search_upload(upload, additional_keys, published=True)
+    assert_upload_files(processed.upload_id, entries, PublicUploadFiles, published=True)
+    assert_search_upload(entries, additional_keys, published=True)
 
 
 def test_publish_failed(
-        non_empty_uploaded: Tuple[str, str], example_user_metadata, test_user,
+        non_empty_uploaded: Tuple[str, str], internal_example_user_metadata, test_user,
         monkeypatch, proc_infra):
 
     mock_failure(Calc, 'parsing', monkeypatch)
 
     processed = run_processing(non_empty_uploaded, test_user)
-    processed.compress_and_set_metadata(example_user_metadata)
+    processed.compress_and_set_metadata(internal_example_user_metadata)
 
     additional_keys = ['with_embargo']
 
@@ -185,9 +185,9 @@ def test_publish_failed(
     except Exception:
         pass
 
-    upload = processed.to_upload_with_metadata(example_user_metadata)
+    entries = processed.entries_metadata(internal_example_user_metadata)
 
-    assert_search_upload(upload, additional_keys, published=True, processed=False)
+    assert_search_upload(entries, additional_keys, published=True, processed=False)
 
 
 @pytest.mark.timeout(config.tests.default_timeout)
@@ -211,7 +211,7 @@ def test_process_non_existing(proc_infra, test_user, with_error):
 
 @pytest.mark.timeout(config.tests.default_timeout)
 @pytest.mark.parametrize('with_failure', [None, 'before', 'after', 'not-matched'])
-def test_re_processing(published: Upload, example_user_metadata, monkeypatch, with_failure):
+def test_re_processing(published: Upload, internal_example_user_metadata, monkeypatch, with_failure):
     if with_failure == 'not-matched':
         monkeypatch.setattr('nomad.config.reprocess_unmatched', False)
 
@@ -249,7 +249,7 @@ def test_re_processing(published: Upload, example_user_metadata, monkeypatch, wi
     shutil.copyfile(
         raw_files, published.upload_files.join_file('raw-restricted.plain.zip').os_path)
 
-    upload = published.to_upload_with_metadata(example_user_metadata)
+    entries = published.entries_metadata(internal_example_user_metadata)
 
     # reprocess
     monkeypatch.setattr('nomad.config.version', 're_process_test_version')
@@ -292,10 +292,10 @@ def test_re_processing(published: Upload, example_user_metadata, monkeypatch, wi
         assert old_log_lines != new_log_lines
 
     # assert maintained user metadata (mongo+es)
-    assert_upload_files(upload, PublicUploadFiles, published=True)
-    assert_search_upload(upload, published=True)
+    assert_upload_files(published.upload_id, entries, PublicUploadFiles, published=True)
+    assert_search_upload(entries, published=True)
     if with_failure not in ['after', 'not-matched']:
-        assert_processing(Upload.get(upload.upload_id, include_published=True), published=True)
+        assert_processing(Upload.get(published.upload_id, include_published=True), published=True)
 
     # assert changed calc metadata (mongo)
     if with_failure not in ['after', 'not-matched']:
@@ -306,7 +306,7 @@ def test_re_processing(published: Upload, example_user_metadata, monkeypatch, wi
 
 @pytest.mark.timeout(config.tests.default_timeout)
 @pytest.mark.parametrize('with_failure', [None, 'before', 'after'])
-def test_re_pack(published: Upload, example_user_metadata, monkeypatch, with_failure):
+def test_re_pack(published: Upload, monkeypatch, with_failure):
     upload_id = published.upload_id
     calc = Calc.objects(upload_id=upload_id).first()
     assert calc.metadata['with_embargo']
@@ -403,6 +403,6 @@ def test_ems_data(proc_infra, test_user):
     assert upload.total_calcs == 1
     assert len(upload.calcs) == 1
 
-    upload_with_metadata = upload.to_upload_with_metadata()
-    assert_upload_files(upload_with_metadata, StagingUploadFiles, published=False)
-    assert_search_upload(upload_with_metadata, additional_keys, published=False)
+    entries = upload.entries_metadata()
+    assert_upload_files(upload.upload_id, entries, StagingUploadFiles, published=False)
+    assert_search_upload(entries, additional_keys, published=False)
diff --git a/tests/test_client.py b/tests/test_client.py
index c2ee35226d..354bca0f0c 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -15,7 +15,7 @@
 import time
 
 from nomad.processing import SUCCESS
-from nomad.datamodel import CalcWithMetadata
+from nomad.datamodel import EntryMetadata
 
 from tests.test_files import example_file
 from tests.test_search import create_entry
@@ -37,8 +37,8 @@ def test_upload(bravado, proc_infra, no_warn):
 
 
 def test_get_repo_calc(bravado, proc_infra, raw_files):
-    create_entry(CalcWithMetadata(
-        domain='dft', calc_id=0, upload_id='test_upload', published=True, with_embargo=False))
+    create_entry(EntryMetadata(
+        domain='dft', calc_id='0', upload_id='test_upload', published=True, with_embargo=False))
     repo = bravado.repo.get_repo_calc(upload_id='test_upload', calc_id='0').response().result
     assert repo is not None
     assert repo['calc_id'] is not None
diff --git a/tests/test_datamodel.py b/tests/test_datamodel.py
index f00ea36b5b..2125b99d89 100644
--- a/tests/test_datamodel.py
+++ b/tests/test_datamodel.py
@@ -12,9 +12,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-"""
+'''
 A generator for random test calculations.
-"""
+'''
 
 import random
 from essential_generators import DocumentGenerator
@@ -65,49 +65,50 @@ def _gen_ref():
     return random.choice(references)
 
 
-def generate_calc(pid: int = 0, calc_id: str = None, upload_id: str = None) -> datamodel.CalcWithMetadata:
+def generate_calc(pid: int = 0, calc_id: str = None, upload_id: str = None) -> datamodel.EntryMetadata:
     random.seed(pid)
 
-    self = datamodel.DFTCalcWithMetadata()
-
-    self.upload_id = upload_id if upload_id is not None else utils.create_uuid()
-    self.calc_id = calc_id if calc_id is not None else utils.create_uuid()
-
-    self.upload_time = datetime.datetime.utcnow()
-    self.calc_hash = utils.create_uuid()
-    self.pid = pid
-    self.mainfile = random.choice(filepaths)
-    self.files = list([self.mainfile] + random.choices(filepaths, k=random.choice(low_numbers_for_files)))
-    self.uploader = _gen_user()
-
-    self.with_embargo = random.choice([True, False])
-    self.published = True
-    self.coauthors = list(_gen_user() for _ in range(0, random.choice(low_numbers_for_refs_and_datasets)))
-    self.shared_with = list(_gen_user() for _ in range(0, random.choice(low_numbers_for_refs_and_datasets)))
-    self.comment = random.choice(comments)
-    self.references = list(_gen_ref() for _ in range(0, random.choice(low_numbers_for_refs_and_datasets)))
-    self.datasets = list(
+    entry = datamodel.EntryMetadata()
+
+    entry.upload_id = upload_id if upload_id is not None else utils.create_uuid()
+    entry.calc_id = calc_id if calc_id is not None else utils.create_uuid()
+
+    entry.upload_time = datetime.datetime.utcnow()
+    entry.calc_hash = utils.create_uuid()
+    entry.pid = pid
+    entry.mainfile = random.choice(filepaths)
+    entry.files = list([entry.mainfile] + random.choices(filepaths, k=random.choice(low_numbers_for_files)))
+    entry.uploader = _gen_user()
+
+    entry.with_embargo = random.choice([True, False])
+    entry.published = True
+    entry.coauthors = list(_gen_user() for _ in range(0, random.choice(low_numbers_for_refs_and_datasets)))
+    entry.shared_with = list(_gen_user() for _ in range(0, random.choice(low_numbers_for_refs_and_datasets)))
+    entry.comment = random.choice(comments)
+    entry.references = list(_gen_ref() for _ in range(0, random.choice(low_numbers_for_refs_and_datasets)))
+    entry.datasets = list(
         _gen_dataset()
         for _ in range(0, random.choice(low_numbers_for_refs_and_datasets)))
 
-    self.atoms = list(random.choices(chemical_symbols[1:], k=random.choice(low_numbers_for_atoms)))
-    self.formula = ''.join('%s%d' % (atom, random.choice(low_numbers_for_atoms)) for atom in self.atoms)
-    self.formula = self.formula.replace('1', '')
+    entry.atoms = list(random.choices(chemical_symbols[1:], k=random.choice(low_numbers_for_atoms)))
+    entry.formula = ''.join('%s%d' % (atom, random.choice(low_numbers_for_atoms)) for atom in entry.atoms)
+    entry.formula = entry.formula.replace('1', '')
 
-    self.basis_set = random.choice(basis_sets)
-    self.xc_functional = random.choice(xc_functionals)
-    self.system = random.choice(systems)
-    self.crystal_system = random.choice(crystal_systems)
+    dft_metadata = entry.m_create(datamodel.DFTMetadata)
+    dft_metadata.basis_set = random.choice(basis_sets)
+    dft_metadata.xc_functional = random.choice(xc_functionals)
+    dft_metadata.system = random.choice(systems)
+    dft_metadata.crystal_system = random.choice(crystal_systems)
     spacegroup = random.randint(1, 225)
-    self.spacegroup = str(spacegroup)
-    self.spacegroup_symbol = Spacegroup(spacegroup).symbol
-    self.code_name = random.choice(codes)
-    self.code_version = '1.0.0'
+    dft_metadata.spacegroup = str(spacegroup)
+    dft_metadata.spacegroup_symbol = Spacegroup(spacegroup).symbol
+    dft_metadata.code_name = random.choice(codes)
+    dft_metadata.code_version = '1.0.0'
 
-    self.n_total_energies = random.choice(range(0, 5))
-    self.geometries = ['%d' % random.randint(1, 500), '%d' % random.randint(1, 500)]
+    dft_metadata.n_total_energies = random.choice(range(0, 5))
+    dft_metadata.geometries = ['%d' % random.randint(1, 500), '%d' % random.randint(1, 500)]
 
-    return self
+    return entry
 
 
 if __name__ == '__main__':
@@ -130,7 +131,6 @@ if __name__ == '__main__':
 
     for calcs_per_upload in utils.chunks(range(0, n_calcs), int(n_calcs / n_uploads)):
         upload_id = utils.create_uuid()
-        upload = datamodel.UploadWithMetadata(upload_id=upload_id)
         upload_files = files.StagingUploadFiles(
             upload_id=upload_id, create=True, is_authorized=lambda: True)
 
@@ -150,7 +150,7 @@ if __name__ == '__main__':
             with upload_files.archive_log_file(calc.calc_id, 'wt') as f:
                 f.write('this is a generated test file')
 
-            search_entry = search.Entry.from_calc_with_metadata(calc)
+            search_entry = search.Entry.from_entry_metadata(calc)
             search_entry.n_total_energies = random.choice(low_numbers_for_total_energies)
             search_entry.n_geometries = low_numbers_for_geometries
             for _ in range(0, random.choice(search_entry.n_geometries)):
@@ -160,11 +160,9 @@ if __name__ == '__main__':
             pid += 1
             calcs.append(calc)
 
-        upload.calcs = calcs
-
         bulk(
             infrastructure.elastic_client,
             [entry.to_dict(include_meta=True) for entry in search_entries])
 
-        upload_files.pack(upload)
+        upload_files.pack(calcs)
         upload_files.delete()
diff --git a/tests/test_files.py b/tests/test_files.py
index 62d071f4d8..ce9d0cffa8 100644
--- a/tests/test_files.py
+++ b/tests/test_files.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from typing import Generator, Any, Dict, Tuple
+from typing import Generator, Any, Dict, Tuple, Iterable
 import os
 import os.path
 import shutil
@@ -22,8 +22,7 @@ import itertools
 import zipfile
 import re
 
-from nomad import config
-from nomad.datamodel import UploadWithMetadata, CalcWithMetadata
+from nomad import config, datamodel
 from nomad.files import DirectoryObject, PathObject
 from nomad.files import StagingUploadFiles, PublicUploadFiles, UploadFiles, Restricted, \
     ArchiveBasedStagingUploadFiles
@@ -31,10 +30,10 @@ from nomad.files import StagingUploadFiles, PublicUploadFiles, UploadFiles, Rest
 from tests.utils import assert_exception
 
 
-CalcWithFiles = Tuple[CalcWithMetadata, str]
-UploadWithFiles = Tuple[UploadWithMetadata, UploadFiles]
-StagingUploadWithFiles = Tuple[UploadWithMetadata, StagingUploadFiles]
-PublicUploadWithFiles = Tuple[UploadWithMetadata, PublicUploadFiles]
+CalcWithFiles = Tuple[datamodel.EntryMetadata, str]
+UploadWithFiles = Tuple[str, Iterable[datamodel.EntryMetadata], UploadFiles]
+StagingUploadWithFiles = Tuple[str, Iterable[datamodel.EntryMetadata], StagingUploadFiles]
+PublicUploadWithFiles = Tuple[str, Iterable[datamodel.EntryMetadata], PublicUploadFiles]
 
 # example_file uses an artificial parser for faster test execution, can also be
 # changed to examples_vasp.zip for using vasp parser
@@ -56,7 +55,7 @@ example_data = dict(test_key='test_value')
 
 @pytest.fixture(scope='function', autouse=True)
 def raw_files_on_all_tests(raw_files):
-    """ Autouse fixture to apply raw_files to all tests. """
+    ''' Autouse fixture to apply raw_files to all tests. '''
     pass
 
 
@@ -125,9 +124,9 @@ example_calc_id = example_calc['calc_id']
 def generate_example_calc(
         calc_id: int, with_mainfile_prefix: bool, subdirectory: str = None,
         **kwargs) -> CalcWithFiles:
-    """ Generate an example calc with :class:`CalcWithMetadata` and rawfile. """
+    ''' Generate an example calc with :class:`EntryMetadata` and rawfile. '''
 
-    example_calc = CalcWithMetadata(domain='dft', calc_id=str(calc_id))
+    example_calc = datamodel.EntryMetadata(domain='dft', calc_id=str(calc_id))
 
     if with_mainfile_prefix:
         mainfile = '%d.template.json' % calc_id
@@ -138,7 +137,7 @@ def generate_example_calc(
         mainfile = os.path.join(subdirectory, mainfile)
 
     example_calc.mainfile = mainfile
-    example_calc.update(**kwargs)
+    example_calc.m_update(**kwargs)
 
     example_file = os.path.join(config.fs.tmp, 'example.zip')
     example_calc.files = []
@@ -209,8 +208,8 @@ class UploadFilesContract(UploadFilesFixtures):
         assert UploadFiles.get(empty_test_upload.upload_id).__class__ == empty_test_upload.__class__
 
     def test_rawfile(self, test_upload: UploadWithFiles):
-        upload, upload_files = test_upload
-        for calc in upload.calcs:
+        _, entries, upload_files = test_upload
+        for calc in entries:
             try:
                 for file_path in calc.files:
                     with upload_files.raw_file(file_path) as f:
@@ -222,8 +221,8 @@ class UploadFilesContract(UploadFilesFixtures):
                 assert calc.with_embargo
 
     def test_rawfile_size(self, test_upload: UploadWithFiles):
-        upload, upload_files = test_upload
-        for calc in upload.calcs:
+        _, entries, upload_files = test_upload
+        for calc in entries:
             try:
                 for file_path in calc.files:
                     assert upload_files.raw_file_size(file_path) > 0
@@ -235,13 +234,13 @@ class UploadFilesContract(UploadFilesFixtures):
 
     @pytest.mark.parametrize('prefix', [None, 'examples'])
     def test_raw_file_manifest(self, test_upload: UploadWithFiles, prefix: str):
-        _, upload_files = test_upload
+        _, _, upload_files = test_upload
         raw_files = list(upload_files.raw_file_manifest(path_prefix=prefix))
         assert_example_files(raw_files)
 
     @pytest.mark.parametrize('prefix', [None, 'examples_template'])
     def test_raw_file_list(self, test_upload: UploadWithFiles, prefix: str):
-        _, upload_files = test_upload
+        _, _, upload_files = test_upload
         raw_files = list(upload_files.raw_file_list(directory=prefix))
         if prefix is None:
             assert len(raw_files) == 0
@@ -256,8 +255,8 @@ class UploadFilesContract(UploadFilesFixtures):
 
     @pytest.mark.parametrize('test_logs', [True, False])
     def test_archive(self, test_upload: UploadWithFiles, test_logs: bool):
-        upload, upload_files = test_upload
-        calcs = upload.calcs_dict
+        _, entries, upload_files = test_upload
+        calcs_dict = {entry.calc_id: entry for entry in entries}
         try:
             if test_logs:
                 with upload_files.archive_log_file(example_calc_id, 'rt') as f:
@@ -267,26 +266,26 @@ class UploadFilesContract(UploadFilesFixtures):
                 assert json.load(f) == json.loads(example_archive_contents)
 
             if not upload_files._is_authorized():
-                assert not calcs.get(example_calc_id).with_embargo
+                assert not calcs_dict.get(example_calc_id).with_embargo
         except Restricted:
             assert not upload_files._is_authorized()
-            assert calcs.get(example_calc_id).with_embargo
+            assert calcs_dict.get(example_calc_id).with_embargo
 
     def test_archive_size(self, test_upload: UploadWithFiles):
-        upload, upload_files = test_upload
-        calcs = upload.calcs_dict
+        _, entries, upload_files = test_upload
+        calcs_dict = {entry.calc_id: entry for entry in entries}
         try:
             assert upload_files.archive_file_size(example_calc_id) > 0
 
             if not upload_files._is_authorized():
-                assert not calcs.get(example_calc_id).with_embargo
+                assert not calcs_dict.get(example_calc_id).with_embargo
         except Restricted:
             assert not upload_files._is_authorized()
-            assert calcs.get(example_calc_id).with_embargo
+            assert calcs_dict.get(example_calc_id).with_embargo
 
 
 def create_staging_upload(upload_id: str, calc_specs: str) -> StagingUploadWithFiles:
-    """
+    '''
     Create an upload according to given spec. Additional arguments are given to
     the StagingUploadFiles contstructor.
 
@@ -297,9 +296,8 @@ def create_staging_upload(upload_id: str, calc_specs: str) -> StagingUploadWithF
             The calcs will be copies of calcs in `example_file`.
             First calc is at top level, following calcs will be put under 1/, 2/, etc.
             All calcs with capital `P`/`R` will be put in the same directory under multi/.
-    """
+    '''
     upload_files = StagingUploadFiles(upload_id, create=True, is_authorized=lambda: True)
-    upload = UploadWithMetadata(upload_id=upload_id)
     calcs = []
 
     prefix = 0
@@ -327,8 +325,7 @@ def create_staging_upload(upload_id: str, calc_specs: str) -> StagingUploadWithF
         prefix += 1
 
     assert len(calcs) == len(calc_specs)
-    upload.calcs = calcs
-    return upload, upload_files
+    return upload_id, calcs, upload_files
 
 
 class TestStagingUploadFiles(UploadFilesContract):
@@ -353,27 +350,27 @@ class TestStagingUploadFiles(UploadFilesContract):
                     assert len(content) > 0
 
     def test_write_archive(self, test_upload: StagingUploadWithFiles):
-        _, upload_files = test_upload
+        _, _, upload_files = test_upload
         assert json.load(upload_files.archive_file(example_calc_id, 'rt')) == json.loads(example_archive_contents)
 
     def test_calc_id(self, test_upload: StagingUploadWithFiles):
-        _, upload_files = test_upload
+        _, _, upload_files = test_upload
         assert upload_files.calc_id(example_file_mainfile) is not None
 
     def test_pack(self, test_upload: StagingUploadWithFiles):
-        upload, upload_files = test_upload
-        upload_files.pack(upload)
+        _, entries, upload_files = test_upload
+        upload_files.pack(entries)
 
     @pytest.mark.parametrize('with_mainfile', [True, False])
     def test_calc_files(self, test_upload: StagingUploadWithFiles, with_mainfile):
-        upload, upload_files = test_upload
-        for calc in upload.calcs:
+        _, entries, upload_files = test_upload
+        for calc in entries:
             mainfile = calc.mainfile
             calc_files = upload_files.calc_files(mainfile, with_mainfile=with_mainfile)
             assert_example_files(calc_files, with_mainfile=with_mainfile)
 
     def test_delete(self, test_upload: StagingUploadWithFiles):
-        _, upload_files = test_upload
+        _, _, upload_files = test_upload
         upload_files.delete()
         assert not upload_files.exists()
 
@@ -396,17 +393,17 @@ class TestArchiveBasedStagingUploadFiles(UploadFilesFixtures):
 def create_public_upload(
         upload_id: str, calc_specs: str, **kwargs) -> PublicUploadWithFiles:
 
-    upload, upload_files = create_staging_upload(upload_id, calc_specs)
-    upload_files.pack(upload)
+    _, entries, upload_files = create_staging_upload(upload_id, calc_specs)
+    upload_files.pack(entries)
     upload_files.delete()
-    return upload, PublicUploadFiles(upload_id, **kwargs)
+    return upload_id, entries, PublicUploadFiles(upload_id, **kwargs)
 
 
 class TestPublicUploadFiles(UploadFilesContract):
 
     @pytest.fixture(scope='function')
     def empty_test_upload(self, test_upload_id: str) -> UploadFiles:
-        _, upload_files = create_public_upload(
+        _, _, upload_files = create_public_upload(
             test_upload_id, calc_specs='', is_authorized=lambda: True)
 
         return upload_files
@@ -415,13 +412,13 @@ class TestPublicUploadFiles(UploadFilesContract):
         ['r', 'rr', 'pr', 'rp', 'p', 'pp', 'RP', 'RR', 'PP'], [True, False]))
     def test_upload(self, request, test_upload_id: str) -> PublicUploadWithFiles:
         calc_specs, protected = request.param
-        upload, upload_files = create_staging_upload(test_upload_id, calc_specs=calc_specs)
-        upload_files.pack(upload)
+        _, entries, upload_files = create_staging_upload(test_upload_id, calc_specs=calc_specs)
+        upload_files.pack(entries)
         upload_files.delete()
-        return upload, PublicUploadFiles(test_upload_id, is_authorized=lambda: not protected)
+        return test_upload_id, entries, PublicUploadFiles(test_upload_id, is_authorized=lambda: not protected)
 
     def test_to_staging_upload_files(self, test_upload):
-        upload, upload_files = test_upload
+        _, entries, upload_files = test_upload
         assert upload_files.to_staging_upload_files() is None
         staging_upload_files = upload_files.to_staging_upload_files(create=True)
         assert staging_upload_files is not None
@@ -438,7 +435,7 @@ class TestPublicUploadFiles(UploadFilesContract):
             with open(f, 'wt') as fh:
                 fh.write('')
 
-        staging_upload_files.pack(upload)
+        staging_upload_files.pack(entries)
         staging_upload_files.delete()
 
         # We do a very simple check. We made all files empty, those that are rezipped
@@ -453,19 +450,20 @@ class TestPublicUploadFiles(UploadFilesContract):
         assert upload_files.to_staging_upload_files() is None
 
     def test_repack(self, test_upload):
-        upload, upload_files = test_upload
-        for calc in upload.calcs:
+        upload_id, entries, upload_files = test_upload
+        for calc in entries:
             calc.with_embargo = False
-        upload_files.re_pack(upload)
-        assert_upload_files(upload, PublicUploadFiles, with_embargo=False)
+        upload_files.re_pack(entries)
+        assert_upload_files(upload_id, entries, PublicUploadFiles, with_embargo=False)
         assert len(os.listdir(upload_files.os_path)) == 8
         with assert_exception(KeyError):
             StagingUploadFiles(upload_files.upload_id)
 
 
 def assert_upload_files(
-        upload: UploadWithMetadata, cls, no_archive: bool = False, **kwargs):
-    """
+        upload_id: str, entries: Iterable[datamodel.EntryMetadata], cls,
+        no_archive: bool = False, **kwargs):
+    '''
     Asserts the files aspect of uploaded data after processing or publishing
 
     Arguments:
@@ -473,13 +471,13 @@ def assert_upload_files(
         cls: The :class:`UploadFiles` subclass that this upload should have
         n_calcs: The number of expected calcs in the upload
         **kwargs: Key, value pairs that each calc metadata should have
-    """
-    upload_files = UploadFiles.get(upload.upload_id, is_authorized=lambda: True)
+    '''
+    upload_files = UploadFiles.get(upload_id, is_authorized=lambda: True)
     assert upload_files is not None
     assert isinstance(upload_files, cls)
 
-    upload_files = UploadFiles.get(upload.upload_id)
-    for calc in upload.calcs:
+    upload_files = UploadFiles.get(upload_id)
+    for calc in entries:
         try:
             with upload_files.raw_file(calc.mainfile) as f:
                 f.read()
diff --git a/tests/test_metainfo.py b/tests/test_metainfo.py
index 32adbc749b..4bb544c277 100644
--- a/tests/test_metainfo.py
+++ b/tests/test_metainfo.py
@@ -45,7 +45,7 @@ def assert_section_instance(section: MSection):
 
 
 class TestM3:
-    """ Test for meta-info definition that are used to define other definitions. """
+    ''' Test for meta-info definition that are used to define other definitions. '''
 
     def test_section(self):
         assert Section.m_def == Section.m_def.m_def
@@ -84,7 +84,7 @@ class TestM3:
 
 
 class TestPureReflection:
-    """ Test for using meta-info instances without knowing/using the respective definitions. """
+    ''' Test for using meta-info instances without knowing/using the respective definitions. '''
 
     def test_instantiation(self):
         test_section_def = Section(name='TestSection')
@@ -98,19 +98,19 @@ class TestPureReflection:
 
 
 class MaterialDefining(MCategory):
-    """Quantities that add to what constitutes a different material."""
+    '''Quantities that add to what constitutes a different material.'''
     pass
 
 
 class TestM2:
-    """ Test for meta-info definitions. """
+    ''' Test for meta-info definitions. '''
 
     def test_basics(self):
         assert_section_def(Run.m_def)
         assert_section_def(System.m_def)
 
     def test_default_section_def(self):
-        """ A section class without an explicit section def must set a default section def. """
+        ''' A section class without an explicit section def must set a default section def. '''
         assert Run.m_def is not None
         assert Run.m_def.name == 'Run'
 
@@ -231,9 +231,12 @@ class TestM2:
     def test_qualified_name(self):
         assert System.m_def.qualified_name() == 'nomad.metainfo.example.System'
 
+    def test_derived_virtual(self):
+        assert System.n_atoms.virtual
+
 
 class TestM1:
-    """ Test for meta-info instances. """
+    ''' Test for meta-info instances. '''
 
     def test_run(self):
         class Run(MSection):
@@ -257,6 +260,30 @@ class TestM1:
 
         assert_section_instance(system)
 
+    def test_set_none(self):
+        run = Run()
+        run.code_name = 'test'
+        assert run.code_name is not None
+
+        run.code_name = None
+        assert run.code_name is None
+
+    def test_set_subsection(self):
+        run = Run()
+        first = Parsing()
+        run.parsing = first
+        assert first.m_parent == run
+        assert run.parsing == first
+
+        second = Parsing()
+        run.parsing = second
+        assert first.m_parent is None
+        assert second.m_parent == run
+        assert run.parsing == second
+
+        run.parsing = None
+        assert run.parsing is None
+
     def test_defaults(self):
         assert len(System().periodic_dimensions) == 3
         assert System().atom_labels is None
@@ -333,6 +360,7 @@ class TestM1:
     def example_data(self):
         run = Run()
         run.code_name = 'test code name'
+        run.m_create(Parsing)
         system: System = run.m_create(System)
         system.atom_labels = ['H', 'H', 'O']
         system.atom_positions = np.array([[1.2e-10, 0, 0], [0, 1.2e-10, 0], [0, 0, 1.2e-10]])
@@ -356,6 +384,15 @@ class TestM1:
 
         self.assert_example_data(new_example_data)
 
+    def test_to_dict_defaults(self, example_data):
+        dct = example_data.m_to_dict()
+        assert 'nomad_version' not in dct['parsing']
+        assert 'n_atoms' not in dct['systems'][0]
+
+        dct = example_data.m_to_dict(include_defaults=True)
+        assert 'nomad_version' in dct['parsing']
+        assert 'n_atoms' not in dct['systems'][0]
+
     def test_derived(self):
         system = System()
 
@@ -412,6 +449,17 @@ class TestM1:
 
         assert len(resource.all(System)) == 2
 
+    def test_mapping(self):
+        run = Run()
+        run.m_create(Parsing).parser_name = 'test'
+        system = run.m_create(System)
+        system.atom_labels = ['H', 'O']
+
+        assert run.systems[0].atom_labels == ['H', 'O']
+        assert run['systems.0.atom_labels'] == ['H', 'O']
+        assert run['systems/0/atom_labels'] == ['H', 'O']
+        assert run['parsing.parser_name'] == 'test'
+
 
 class TestEnvironment:
 
diff --git a/tests/test_normalizing.py b/tests/test_normalizing.py
index 9354d0aedc..8e32d6b0e7 100644
--- a/tests/test_normalizing.py
+++ b/tests/test_normalizing.py
@@ -50,36 +50,36 @@ vasp_parser_dos = (
 glucose_atom_labels = (
     'parsers/template', 'tests/data/normalizers/glucose_atom_labels.json')
 
-symmetry_keys = ['spacegroup', 'spacegroup_symbol', 'crystal_system']
+symmetry_keys = ['dft.spacegroup', 'dft.spacegroup_symbol', 'dft.crystal_system']
 calc_metadata_keys = [
-    'code_name', 'code_version', 'basis_set', 'xc_functional', 'system', 'formula'] + symmetry_keys
+    'dft.code_name', 'dft.code_version', 'dft.basis_set', 'dft.xc_functional', 'dft.system', 'formula'] + symmetry_keys
 
 parser_exceptions = {
-    'parsers/wien2k': ['xc_functional'],
+    'parsers/wien2k': ['dft.xc_functional'],
     'parsers/nwchem': symmetry_keys,
     'parsers/bigdft': symmetry_keys,
     'parsers/gaussian': symmetry_keys,
-    'parsers/abinit': ['formula', 'system'] + symmetry_keys,
-    'parsers/dl-poly': ['formula', 'basis_set', 'xc_functional', 'system'] + symmetry_keys,
-    'parsers/lib-atoms': ['basis_set', 'xc_functional'],
+    'parsers/abinit': ['formula', 'dft.system'] + symmetry_keys,
+    'parsers/dl-poly': ['formula', 'dft.basis_set', 'dft.xc_functional', 'dft.system'] + symmetry_keys,
+    'parsers/lib-atoms': ['dft.basis_set', 'dft.xc_functional'],
     'parsers/orca': symmetry_keys,
     'parsers/octopus': symmetry_keys,
-    'parsers/phonopy': ['basis_set', 'xc_functional'],
+    'parsers/phonopy': ['dft.basis_set', 'dft.xc_functional'],
     'parsers/gpaw2': symmetry_keys,
-    'parsers/gamess': ['formula', 'system'] + symmetry_keys,
-    'parsers/gulp': ['formula', 'xc_functional', 'system', 'basis_set'] + symmetry_keys,
+    'parsers/gamess': ['formula', 'dft.system', 'dft.xc_functional'] + symmetry_keys,
+    'parsers/gulp': ['formula', 'dft.xc_functional', 'dft.system', 'dft.basis_set'] + symmetry_keys,
     'parsers/turbomole': symmetry_keys,
-    'parsers/elastic': ['basis_set', 'xc_functional', 'system'] + symmetry_keys,
-    'parsers/dmol': ['system'] + symmetry_keys,
+    'parsers/elastic': ['dft.basis_set', 'dft.xc_functional', 'dft.system'] + symmetry_keys,
+    'parsers/dmol': ['dft.system'] + symmetry_keys,
     'parser/molcas': symmetry_keys,
-    'parsers/band': ['system'] + symmetry_keys,
-    'parsers/qbox': ['xc_functional'],
-    'parser/onetep': ['formula', 'basis_set', 'xc_functional', 'system'] + symmetry_keys
+    'parsers/band': ['dft.system'] + symmetry_keys,
+    'parsers/qbox': ['dft.xc_functional'],
+    'parser/onetep': ['formula', 'dft.basis_set', 'dft.xc_functional', 'dft.system'] + symmetry_keys
 }
-"""
+'''
 Keys that the normalizer for certain parsers might not produce. In an ideal world this
 map would be empty.
-"""
+'''
 
 
 def run_normalize(backend: LocalBackend) -> LocalBackend:
@@ -209,17 +209,17 @@ def test_template_example_normalizer(parsed_template_example, no_warn, caplog):
 
 
 def assert_normalized(backend: LocalBackend):
-    metadata = datamodel.DFTCalcWithMetadata()
+    metadata = datamodel.EntryMetadata(domain='dft')
     metadata.apply_domain_metadata(backend)
     assert metadata.formula is not None
-    assert metadata.code_name is not None
-    assert metadata.code_version is not None
-    assert metadata.basis_set is not None
-    assert metadata.xc_functional is not None
-    assert metadata.system is not None
-    assert metadata.crystal_system is not None
+    assert metadata.dft.code_name is not None
+    assert metadata.dft.code_version is not None
+    assert metadata.dft.basis_set is not None
+    assert metadata.dft.xc_functional is not None
+    assert metadata.dft.system is not None
+    assert metadata.dft.crystal_system is not None
     assert len(metadata.atoms) is not None
-    assert metadata.spacegroup is not None
+    assert metadata.dft.spacegroup is not None
 
     exceptions = parser_exceptions.get(backend.get_value('parser_name'), [])
 
@@ -228,7 +228,7 @@ def assert_normalized(backend: LocalBackend):
 
     for key in calc_metadata_keys:
         if key not in exceptions:
-            assert getattr(metadata, key) != config.services.unavailable_value
+            assert metadata[key] != config.services.unavailable_value
 
 
 def test_normalizer(normalized_example: LocalBackend):
@@ -236,7 +236,7 @@ def test_normalizer(normalized_example: LocalBackend):
 
 
 def test_normalizer_faulty_matid(caplog):
-    """ Runs normalizer on an example w/ bools for atom pos. Should force matid error."""
+    ''' Runs normalizer on an example w/ bools for atom pos. Should force matid error.'''
     # assert isinstance(backend, LocalBackend)
     backend = parse_file(boolean_positions)
     run_normalize(backend)
@@ -245,26 +245,26 @@ def test_normalizer_faulty_matid(caplog):
 
 
 def test_normalizer_single_string_atom_labels(caplog):
-    """
+    '''
     Runs normalizer on ['Br1SiSiK'] expects error. Should replace the label with 'X' and
     the numbers of postitions should not match the labels.
-    """
+    '''
     backend = parse_file(single_string_atom_labels)
     run_normalize(backend)
     assert_log(caplog, 'ERROR', 'len of atom position does not match number of atoms')
 
 
 def test_normalizer_unknown_atom_label(caplog, no_warn):
-    """ Runs normalizer on ['Br','Si','Si','Za'], for normalizeation Za will be replaced,
+    ''' Runs normalizer on ['Br','Si','Si','Za'], for normalizeation Za will be replaced,
         but stays int the labels.
-    """
+    '''
     backend = parse_file(unknown_atom_label)
     run_normalize(backend)
     assert backend.get_value('atom_labels')[3] == 'Za'
 
 
 def test_symmetry_classification_fcc():
-    """Runs normalizer where lattice vectors should give fcc symmetry."""
+    '''Runs normalizer where lattice vectors should give fcc symmetry.'''
     backend = parse_file(fcc_symmetry)
     backend = run_normalize(backend)
     expected_crystal_system = 'cubic'
@@ -297,9 +297,9 @@ def test_system_classification(atom, molecule, one_d, two_d, surface, bulk):
 
 
 def test_representative_systems(single_point, molecular_dynamics, geometry_optimization, phonon):
-    """Checks that the representative systems are correctly identified and
+    '''Checks that the representative systems are correctly identified and
     processed by SystemNormalizer.
-    """
+    '''
     def check_representative_frames(backend):
         # For systems with multiple frames the first and two last should be processed.
         try:
@@ -343,9 +343,9 @@ def test_reduced_chemical_formula():
 
 
 def test_vasp_incar_system():
-    """
+    '''
     Ensure we can test an incar value in the VASP example
-    """
+    '''
     backend = parse_file(vasp_parser)
     backend = run_normalize(backend)
     expected_value = 'SrTiO3'  # material's formula in vasp.xml
@@ -359,8 +359,8 @@ def test_vasp_incar_system():
 
 
 def test_aflow_prototypes():
-    """Tests that some basis structures are matched with the correct AFLOW prototypes
-    """
+    '''Tests that some basis structures are matched with the correct AFLOW prototypes
+    '''
     # No prototype info for non-bulk structures
     backend = run_normalize_for_structure(ase.build.molecule("H2O"))
     assert len(backend["section_prototype"]) == 0
@@ -422,9 +422,9 @@ def test_aflow_prototypes():
 
 
 def test_springer_normalizer():
-    """
+    '''
     Ensure the Springer normalizer works well with the VASP example.
-    """
+    '''
     backend = parse_file(vasp_parser)
     backend = run_normalize(backend)
 
@@ -442,9 +442,9 @@ def test_springer_normalizer():
 
 
 def test_dos_normalizer():
-    """
+    '''
     Ensure the DOS normalizer acted on the DOS values. We take a VASP example.
-    """
+    '''
     backend = parse_file(vasp_parser_dos)
     backend = run_normalize(backend)
 
diff --git a/tests/test_parsing.py b/tests/test_parsing.py
index 65b65c1001..63cd5d0a35 100644
--- a/tests/test_parsing.py
+++ b/tests/test_parsing.py
@@ -132,7 +132,7 @@ class TestLocalBackend(object):
         assert backend.get_sections('section_symmetry', 2) == [1]
 
     def test_section_override(self, backend, no_warn):
-        """ Test whether we can overwrite values already in the backend."""
+        ''' Test whether we can overwrite values already in the backend.'''
         expected_value = ['Cl', 'Zn']
         backend.openSection('section_run')
         backend.openSection('section_system')
@@ -328,7 +328,7 @@ def assert_parser_result(backend, error=False):
 
 
 def assert_parser_dir_unchanged(previous_wd, current_wd):
-    """Assert working directory has not been changed from parser."""
+    '''Assert working directory has not been changed from parser.'''
     assert previous_wd == current_wd
 
 
diff --git a/tests/test_search.py b/tests/test_search.py
index b51f05c777..cb8ef43873 100644
--- a/tests/test_search.py
+++ b/tests/test_search.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from typing import List
+from typing import List, Iterable
 from elasticsearch_dsl import Q
 import pytest
 
@@ -25,36 +25,39 @@ def test_init_mapping(elastic):
 
 
 def test_index_skeleton_calc(elastic):
-    calc_with_metadata = datamodel.CalcWithMetadata(
-        domain='dft', upload_id='test_upload', calc_id='test_calc')
+    entry_metadata = datamodel.EntryMetadata(
+        domain='dft', upload_id='test_upload', calc_id='test_calc',
+        mainfile='test/mainfile', files=['test/file1', 'test/file2'])
 
-    create_entry(calc_with_metadata)
+    create_entry(entry_metadata)
 
 
 def test_index_normalized_calc(elastic, normalized: parsing.LocalBackend):
-    calc_with_metadata = datamodel.CalcWithMetadata(
+    entry_metadata = datamodel.EntryMetadata(
         domain='dft', upload_id='test upload id', calc_id='test id')
-    calc_with_metadata.apply_domain_metadata(normalized)
+    entry_metadata.apply_domain_metadata(normalized)
 
-    entry = search.flat(create_entry(calc_with_metadata).to_dict())
+    search_entry = create_entry(entry_metadata)
+    entry = search.flat(search_entry.to_dict())
 
     assert 'calc_id' in entry
     assert 'atoms' in entry
     assert 'dft.code_name' in entry
+    assert 'dft.optimade.elements_ratios' in entry
 
 
 def test_index_normalized_calc_with_metadata(
-        elastic, normalized: parsing.LocalBackend, example_user_metadata: dict):
-
-    calc_with_metadata = datamodel.CalcWithMetadata(
+        elastic, normalized: parsing.LocalBackend, internal_example_user_metadata: dict):
+    entry_metadata = datamodel.EntryMetadata(
         domain='dft', upload_id='test upload id', calc_id='test id')
-    calc_with_metadata.apply_domain_metadata(normalized)
-    calc_with_metadata.apply_user_metadata(example_user_metadata)
+    entry_metadata.apply_domain_metadata(normalized)
+    internal_example_user_metadata.pop('embargo_length')  # is for uploads only
+    entry_metadata.apply_user_metadata(internal_example_user_metadata)
 
-    entry = create_entry(calc_with_metadata)
+    entry = create_entry(entry_metadata)
 
-    assert getattr(entry, 'with_embargo') == example_user_metadata['with_embargo']
-    assert getattr(entry, 'comment') == example_user_metadata['comment']
+    assert getattr(entry, 'with_embargo') == internal_example_user_metadata['with_embargo']
+    assert getattr(entry, 'comment') == internal_example_user_metadata['comment']
 
 
 def test_index_upload(elastic, processed: processing.Upload):
@@ -63,10 +66,10 @@ def test_index_upload(elastic, processed: processing.Upload):
 
 @pytest.fixture()
 def example_search_data(elastic, normalized: parsing.LocalBackend):
-    calc_with_metadata = datamodel.CalcWithMetadata(
+    entry_metadata = datamodel.EntryMetadata(
         domain='dft', upload_id='test upload id', calc_id='test id')
-    calc_with_metadata.apply_domain_metadata(normalized)
-    create_entry(calc_with_metadata)
+    entry_metadata.apply_domain_metadata(normalized)
+    create_entry(entry_metadata)
     refresh_index()
 
     return normalized
@@ -74,10 +77,10 @@ def example_search_data(elastic, normalized: parsing.LocalBackend):
 
 @pytest.fixture()
 def example_ems_search_data(elastic, parsed_ems: parsing.LocalBackend):
-    calc_with_metadata = datamodel.CalcWithMetadata(
+    entry_metadata = datamodel.EntryMetadata(
         domain='ems', upload_id='test upload id', calc_id='test id')
-    calc_with_metadata.apply_domain_metadata(parsed_ems)
-    create_entry(calc_with_metadata)
+    entry_metadata.apply_domain_metadata(parsed_ems)
+    create_entry(entry_metadata)
     refresh_index()
 
     return parsed_ems
@@ -200,15 +203,15 @@ def test_search_quantity(
         elastic, normalized: parsing.LocalBackend, test_user: datamodel.User,
         other_test_user: datamodel.User, order_by: str):
 
-    calc_with_metadata = datamodel.CalcWithMetadata(
+    entry_metadata = datamodel.EntryMetadata(
         domain='dft', upload_id='test upload id', calc_id='test id')
-    calc_with_metadata.apply_domain_metadata(normalized)
-    calc_with_metadata.uploader = test_user.user_id
-    create_entry(calc_with_metadata)
+    entry_metadata.apply_domain_metadata(normalized)
+    entry_metadata.uploader = test_user.user_id
+    create_entry(entry_metadata)
 
-    calc_with_metadata.calc_id = 'other test id'
-    calc_with_metadata.uploader = other_test_user.user_id
-    create_entry(calc_with_metadata)
+    entry_metadata.calc_id = 'other test id'
+    entry_metadata.uploader = other_test_user.user_id
+    create_entry(entry_metadata)
     refresh_index()
 
     request = SearchRequest(domain='dft').quantity(
@@ -228,10 +231,10 @@ def refresh_index():
     infrastructure.elastic_client.indices.refresh(index=config.elastic.index_name)
 
 
-def create_entry(calc_with_metadata: datamodel.CalcWithMetadata):
-    entry = search.Entry.from_calc_with_metadata(calc_with_metadata)
+def create_entry(entry_metadata: datamodel.EntryMetadata):
+    entry = search.create_entry(entry_metadata)
     entry.save()
-    assert_entry(calc_with_metadata.calc_id)
+    assert_entry(entry_metadata.calc_id)
     return entry
 
 
@@ -246,11 +249,13 @@ def assert_entry(calc_id):
     assert results[0]['calc_id'] == calc_id
 
 
-def assert_search_upload(upload: datamodel.UploadWithMetadata, additional_keys: List[str] = [], **kwargs):
+def assert_search_upload(
+        upload_entries: Iterable[datamodel.EntryMetadata],
+        additional_keys: List[str] = [], **kwargs):
     keys = ['calc_id', 'upload_id', 'mainfile', 'calc_hash']
     refresh_index()
     search_results = Entry.search().query('match_all')[0:10]
-    assert search_results.count() == len(list(upload.calcs))
+    assert search_results.count() == len(list(upload_entries))
     if search_results.count() > 0:
         for hit in search_results:
             hit = search.flat(hit.to_dict())
@@ -287,7 +292,7 @@ if __name__ == '__main__':
     def gen_data():
         for pid in range(0, n):
             calc = generate_calc(pid)
-            calc = Entry.from_calc_with_metadata(calc)
+            calc = Entry.from_entry_metadata(calc)
             yield calc.to_dict(include_meta=True)
 
     bulk(infrastructure.elastic_client, gen_data())
diff --git a/tests/utils.py b/tests/utils.py
index 67f8340fab..194da6e4e0 100644
--- a/tests/utils.py
+++ b/tests/utils.py
@@ -12,7 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-""" Methods to help with testing of nomad@FAIRDI."""
+''' Methods to help with testing of nomad@FAIRDI.'''
 
 from typing import Type
 import json
@@ -21,7 +21,7 @@ from logging import LogRecord
 
 
 def assert_log(caplog, level: str, event_part: str) -> LogRecord:
-    """
+    '''
     Assert whether a log message exists in the logs of the tests at a certain level.
 
     Parameters
@@ -35,7 +35,7 @@ def assert_log(caplog, level: str, event_part: str) -> LogRecord:
         The error message we're after. We search the logs matching level if they
         contain this string.
 
-    """
+    '''
     record = None
     for record in caplog.get_records(when='call'):
         if record.levelname == level:
@@ -50,10 +50,10 @@ def assert_log(caplog, level: str, event_part: str) -> LogRecord:
 
 @contextmanager
 def assert_exception(exception_cls: Type = Exception):
-    """
+    '''
     A context manager that can be used to assert that the given exception is thrown
     within the respective ``with``clause.
-    """
+    '''
     has_exception = False
     try:
         yield
-- 
GitLab