From b375ec8a029e319dee1f6355ef44313712290823 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=99=93=E6=82=A6?= Date: Mon, 28 Feb 2022 17:25:33 +0800 Subject: [PATCH 1/9] add end-to-end_multimodal_dbmtl --- .github/workflows/ci.yml | 34 +- docs/images/models/e2e_mm_dbmtl.png | Bin 0 -> 219571 bytes .../models/end-to-end_multimodal_dbmtl.md | 201 +++++++++++ docs/source/models/rank.rst | 1 + docs/source/vector_retrieve.md | 6 +- .../python/feature_column/feature_column.py | 21 ++ easy_rec/python/inference/vector_retrieve.py | 4 +- easy_rec/python/input/group_rtp_input.py | 201 +++++++++++ easy_rec/python/input/input.py | 62 +++- easy_rec/python/input/odps_group_rtp_input.py | 204 ++++++++++++ easy_rec/python/layers/common_layers.py | 3 +- easy_rec/python/model/e2e_mm_dbmtl.py | 239 +++++++++++++ easy_rec/python/protos/dataset.proto | 7 + easy_rec/python/protos/dbmtl.proto | 24 ++ easy_rec/python/protos/easy_rec_model.proto | 2 + easy_rec/python/protos/export.proto | 5 + easy_rec/python/protos/feature_config.proto | 11 + easy_rec/python/test/odps_run.py | 8 +- easy_rec/python/test/train_eval_test.py | 5 + easy_rec/python/tools/export_mm_model.py | 202 +++++++++++ easy_rec/version.py | 2 +- .../taobao_fg_e2e_mm_dbmtl.config | 315 ++++++++++++++++++ .../vector_retrieve/drop_table.sql | 2 +- .../vector_retrieve/run_vector_retrieve.sql | 2 +- setup.cfg | 2 +- 25 files changed, 1528 insertions(+), 35 deletions(-) create mode 100644 docs/images/models/e2e_mm_dbmtl.png create mode 100644 docs/source/models/end-to-end_multimodal_dbmtl.md create mode 100644 easy_rec/python/input/group_rtp_input.py create mode 100644 easy_rec/python/input/odps_group_rtp_input.py create mode 100644 easy_rec/python/model/e2e_mm_dbmtl.py create mode 100644 easy_rec/python/tools/export_mm_model.py create mode 100644 samples/model_config/taobao_fg_e2e_mm_dbmtl.config diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8dc01a6e9..4d48134ba 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -52,39 +52,39 @@ jobs: var pass_label = null; if (labels != null) { pass_label = labels.find(label=>label.name=='ci_test_passed'); - } - + } + var fail_label = null; if (labels != null) { fail_label = labels.find(label=>label.name=='ci_test_failed'); - } - + } + if (pass_label) { github.rest.issues.removeLabel({ issue_number: context.issue.number, owner: context.repo.owner, - repo: context.repo.repo, + repo: context.repo.repo, name: 'ci_test_passed' - }) - } - + }) + } + if (fail_label) { github.rest.issues.removeLabel({ issue_number: context.issue.number, owner: context.repo.owner, - repo: context.repo.repo, + repo: context.repo.repo, name: 'ci_test_failed' - }) - } - - if (CI_TEST_PASSED == 1) { + }) + } + + if (CI_TEST_PASSED == 1) { github.rest.issues.addLabels({ issue_number: context.issue.number, owner: context.repo.owner, - repo: context.repo.repo, + repo: context.repo.repo, labels: ['ci_test_passed'] }) - + github.rest.issues.createComment({ owner: context.repo.owner, repo: context.repo.repo, @@ -95,10 +95,10 @@ jobs: github.rest.issues.addLabels({ issue_number: context.issue.number, owner: context.repo.owner, - repo: context.repo.repo, + repo: context.repo.repo, labels: ['ci_test_failed'] }) - + github.rest.issues.createComment({ owner: context.repo.owner, repo: context.repo.repo, diff --git a/docs/images/models/e2e_mm_dbmtl.png b/docs/images/models/e2e_mm_dbmtl.png new file mode 100644 index 0000000000000000000000000000000000000000..b3c569a9af8c5f4e5f70f7b150e27379ac8853a0 GIT binary patch literal 219571 zcmeFZcU)8H+BGZ;A}A#Z9YG<6Dj)$VB8V8GfYd;OpnwJiMUjaPiijXZNC6dt8bB$L zl0rEQG9pDRJJ>*E2+APB7_6}%Y6e8gd+&gbGiT0op5OQVzQ4ZroHH}~-r0LyYpv_L z>vj&!-*T-8-aOes1$l7iU!gru=FKyh=jrYilpyown$Ph) zi_-Ol4HxcMO=e~9Iy#T0av{>dJ5^pQ_n)}S-!<~%n*VQW)`9=)#MesrzfS!B#)&VxZEZis&P=4tT$(;RBN;N_ z@Edvj5(RomP1=M-jo92Y4Q-m{G#~HpMs-H^h-Exl&a-_V>ue>sVIaQwn= zzeQ7seiZXG=`Zmb!SNbG@FL@i;JJn|`x?ych^F7) zwN!o|5?Pr@4-69!!+=ydynp4lm;YzWW}OE3NSHO6(u;&|cWg`z5H1``&2Izi^$*qy52;_LqHK+Li zoVcZz2)_{w#DhfsI}$8YL2On7`}6N4q_0VWQ{q1sPj8sHJ~=f$TW@bu#)iJEn7%tB zKN^&k>DU{ls7sJ5?+(g9zZ0AJN^rPy;u<++v$S7#ny*71jHg#R3B#y?xlZa7t-SSt zNO~h5PBf<(QHhQ>6?8=Bx)1xZ(*yax_o@Z9YL0oXSg#iYo9)G3qU(EB1-$@O8e7eLC`00Z;71j8T2Dz?;GxKMfLA`v~U>A%qe-3+Gn&g=e3v}sNSTAdPHnZTQ~;s0e)=Q6mYpV|zPSLvt@(yfYzJ8Jb;;uS(wH7Hsasc1@!=T2Jl zeYB}!e6XCPhNf;7uZ2&0U|4GOZrw)k-^UtmQz!k)RwgTiW@V6Y8JcdMoMtd`Du`>S zOX25w-Se6jnJJ4;-bbsrJ=Pe--FKP~?zQ6rVLEs{kr1r{f|wFC0(66Ly6T2I&#yZd z3IWklGBN1*V3;-Na|}o5$;?#SjNgnnVp?_jRonQ9sr0tbH~)>YdD@xAMV4u>4*!t? zk6gmR0L)H(RH(@6qJq~~#=`Xq~ZX^ zB;um48@NaDm}LwdS1J#+v}6}W6C2tmj=un!9U5Uz_U5%%Ry?~X0%*& z7QJy%sC(}Btjkq=$`ru8#^v~R8wZZfCHSCEnFqJo6m?NW(UPJSDrZ#s4oroHrUYGA zxoX@xKCN*#GB4wSnW+d2YEy+XZ&TP3znRvVj5&j0GqW-@DRXsV#xZ8ETvFhbj+V2O znoAAbxGtAR{k25**Ai)kN(IcC$)gFwkZBKapmSKVJv)my)P2;!ubB-~ zGb4zt=rAuJ;7rDujHcyj6NaT8yFhAH6st28=8B~fa7E7XH;vaKWPtjW)J{K}d09I> z5jV+9nO;ixvmQq82f^iv1ak@95#kz^!UZwg%imRO0+rZdn*X-!}*02hPvC16G$HBle6;;7O~sBN44mx4JyyABwP; z#oP=YD=k^WtsM8kJI{{~2p%n@CoB}Ri$s2=Fg3L4UoOICDb$s$fjRE|rp7^I^uOxa zJ0ngZNgQTg7IR@aK=MJiBVcVa`IuKZzG%M0(?t+r%3=JSzc6r8QA!Owsfce`Bu)pM z&hBge%x{}sF#V+MW0ybFJj;jtJ>H*MYDBa2Tm;4bz!}wC0%BlGFJru60;8~(8bJlCalf?RpKFOOD2TXg6w?Z|C>l%gLC(D< zl%_+-;L!lN(V8oW%EAb0S6V`F1huD=*h;MxG*7HMx3@-&PqIulQ-CifLt8hx%y)oR zKww2@;`SqM(j8HYt`Rd08l^Dqg1{rF%2{;8hIt-Y(f`2q^RaOY>Osjd$H%aHp_6kqhMp5|bo+5XafRsNzXMV$VUwk- z0!D+10W16#!3HEGH80uFH(=zySb78{=P)q2Zi|2jUqOCyqvo@8zb0vN=4NL{N7Ckj za^ZsV8r@id3^62%ZK9o z33SF^A<;!%Vl#1iJ8|m%_+?a90u?QOAmT_e7bXbOmGiFcPzZ)#N- zWnUqQd83}K3U003+Fp|!DdL<5rQr}>Y_?|CmR{AI;^h=-mMJ^ACYhrZPlU(9S!fAw zP{3V6pTPYmmfv}xu7z)=J6oD5$ zT-P~tp)f!H^Dn$&uPtT1qYX|_9N=Mgg_Jd}Y$yq;95)OX;z2S`Ch`K$c~MV@{Ie24 zt7)MqNOd$oPwcNMQIA+80v)ARd|^CnG+>e-+9*+zsJG>f6QOdd+-|pbZpYng$`*?VQ+f4IQU1E-gv}Jp&9B0DGxmgnmMF78Su8 zLd@b#{qiRqV3br1vIoEK7f%mV9If&FH?$g2y+ODDM8O?Vch#yDT^KKi&Wf4irpacb zU~25Us{{N`{?#Fvb$2$kcZf6)zyPJ%44|DqZPcB_ApE!r)`CLU35TfT-K z;g!Tq2D1t*zL*2tYxFoT+8s|FLcm&UdkuX-#jX=`eJSR!Z*YeGk`sv7IaCX0b~Pen zeBF5wZ`z}UQQHgly<>tv=iZ0ntnnX4)z`TQO(?;&V(t>qIl>eq6*Z!o@ZQ42K(UYW zF@Jm#>L4g$p`lkRtuldwPTtVU#pNZc`HjLqxdvx(D#xo7MCfV%@1adbO$31o68~Z- zYzhJT6ulB5_6ZY&welP!6-6Sm4BiT9Bc4i#6PYdHQHKKJ#E9}iD9||wSqydc8iJ0s zJgt%Hsy-sBFY%=iQ!s-t=Q-{|+z~(v$2lQ3D_X}XrZy)Mm-Yq%x~@nm+rl~6+FpKQ zV3w)GxJXn=AIGfWI?P7&#Ur#v_s4_DG>HVx7hF*zz*jnd;>f7FpQK^JP`E*&?q@g~ zg|~)RnXs@l8SL_jtz3sWk`er1Nrg%P@JOI42}vUSjefWw8pc~TYIOw;Fr?)-8Wh1_ zDpG~!Y-#0zfw7Wv&`?;|L9LK2B&NsTd?~5r(xe z5;zAXyCs3~S|eauRTmb2xb#kB272fGatJr+Uzy1BLqiPu!E&e&aWI}&+2k>B%wSL$ zXf~>TQ5fdZ8`iRM0H2-^=qEu0TT0v>3z{)YbfLmFNsuZRXrcrh9JQilB}y)>;Wo4X zIiz22n7^wq%o#pG`<0$GUg{eU9tQ)jubbJZF*uwMbFEMZ!2tA6LH*n|9`Oe;C_52x z(+>h&%HD~?HVW`k9^&4iiL!U}#9%24QR!fBv?R!t2%erlpRAX$1lcEAg90OvC~^xpK(L*iKmEOV$*X8~}}- z$_;!bgpnKdZ!Qp6?c|!+(TL=3gC$qq#+)e-^2$YJ#&up~5;I}^7e3yH9!fkX#E?8Y zVu(r2oUGXvvOcu;u!P`4){3^1YhWxrau_8l$t=lZ2`V!Cdvg#A8kQR!z;a6u#Ghhi z7%YhiuJ4;~DS>e-I|!igk~It4i54hzEWatZxo@Q=1w4W8V881{$id)s z9(s80;)X>r3KR|ICSrj22Rrw5&NRgZj?mXdFuBDScbYU4lY%RAu)wSXrAg5Fz zXLG!Np>VgIdUrdKlU~q^k%Ycw=kOi*AwGm)JE0YzQTrb>enooBaELLTMU;dp<9815 zW^t79QZ~RhUM4V3D`)k;2_zGu<5$#p!3h7+tX2|3^DDV{B5sad0N{CL=a!Y>+=-8E zPrjxutsQMI0qYH`%v1dWO7`6#-pKc$;{B+MH2sD@Tkuo@iDLv{Rb>meg8359*Bb~)Rm&(9@Az)U3p8veNFDt zUey?_LV#VmxBNgI&8O90HJ(Ul?t?1AIa&Pze{d<+z%eWm;@*-08zm!qJ7_ulgtpjs za=qtUa_+>~S9XAjc!R|M(%09U%(-EE$bl>7-u>?Kdl*jUcn+YBmW-U9BjgpMK+bLA z)E7Xz|D(9??EBw&mh9iGz3y*D5dnX-!o<=gFY{p5L%Y5{ih%Ki>V8Oms+dTppvgp!&od zd^9%5<@35Ne*>Fk$l!n;X%g?R6|q-X{lN1tXp&Iai&+O=N4lNUXrQG(2&sxOC4V^9 z2jP^BVoa-je8{GhKc0mv1C;NHT3s37Y_4X)?nc{jH&=&o9;W?yHOnuoMFR>6V6p^m zuPsOojN7pG0Nk$FS!QD~oOIoo0H!vtDCtnGiddSU6+%yaq2O$N0U&E7JRI@8k*yD` z1{aE|I6it8*Jwh;GaDkL({NXc0X30|?;r;>hYA;v161oge8>SfoMnthizcJsB7|oTlm}fRY2%I_7wOa85c4OsdHGP4JI2Rxi#1tS5x`{TaHbS^?YG zvd+U&RKoGB`qm;-Zy)3^nxj&{6z3GGnZhK-h$f#yrvRC?AbmJd!zN*wktP?fmz3q?Ip=uhH+lHKMhnGv zq!p-_XM%*`1PC&U!4g(U6UW@NQiV*#dz(ignf<-&cXlddFf0E*%_^Dd>B&t$>2Jp; z0)u(LHqQXD=N|LSIgS3n(%FawQjy#+OL6$HL6in#6J((Y{6B(@k{}pDjHca6Av+V= z4Hh69EeH(9ogC)3hBoB97+>%@l_UGOTPwWQ2S zY-mN+ET)>2E+KnVlQkvvEXYbd{$wJ_)znPH&YZ~d67w2(rPPordIAY}6VzJ;`b~*3tN!XbD0K%LwCa9>oZ8N?c+G zJ6V(2cZ_X}EhAxtv!E)3E-=C2w+Q{aM7F1!0{Hy`i$OkMR`m|DC=vuHORAQL&{1`Z zjB40`ZkiMQdu%_5>96{aXfytvk2%D*WX+LY|E?I)L9%9-h?CjGV?m$_w7dqmbL31Z z8nY2bX@6G^9!Woiwu<4TaLQ^z?Sx z-?1hQzjZFq$xwlXI2ua}GZh<1s*?Q)kd>yL5F@F2C#)!=GIYfuNfls$)xh(d{G9T~ zGii<|9S9=yJE&$2$%JBZKvL&KOLwB46y@+(LrPi z9zI}{IT0{G3w!J3%ncU#zKIDIdA6bsl7pA>nU0(+>5GH>^aSp`w1irfH0f){MZCSe zIga$zb^Nec@dgmAVfkoTI1O&oOe9_a?nsrd$QQA6k*M_>1uzYqPOz+*BVc$So}a7^ z;xwcD#HSn8n)^>D5Ne_GeZ4oun0HXE+-SHyv@u>2IvkH!(HnAd{QPWaGBxZmZ!twZ zpbLH#=OSGGefSf-o^!!VR7B-YU&DJ0@Jj!IJFWCTGEfm4fDwK+HUiAC{+Gn-^#lQv zpNHPhnZ1DSzm%7PS>rzg2KfaN!eqFfa3kfyUI79$1~?DJ4UV+MlKKKcz&Xe*sU~Wa zJHDk;6gYZ$VuPr`k#klwCMpT14>8gc&Grg+OG3{)0#t$q+AH+m)3|d)1?5uMYEV6l4QOnc*Y~CWO_fve}!NKe%wLq zigW;reX+T(XnmNF`4=Wr%SKcGLS%9L@=Tc3`c_h{z;l38+-q@MgaJcfF-HnsKOU4+ zGo71YsUmjO_)~5;-*sm36?iY&RXnrSUsT>Fz)hKe4Y6dSgfz8CvUoJVQEddepSb)T zv>LlyLK>j0jt>$an+Q^pa1%`qU@xu-8Ug!daYJK3TyI%8w;oNBkj_JK!Z;{edoCs=FYt{~3$~y=v2dpl|1Z%~UAl@L4Ii53- zGwwY=bDh~d=Tk~8UQYm)BVc-SMb&Qt1Jou6mt-7;WiaF@n#Ht`7Sx976G;rd6C+G` zvN4zuhwC77>ih66(B`2`mY_;(0oLCqAd7cFZ$y{Kn*4sEg=i@)q)Ke)W+5`P60h$d zYieO>^_v8skSwAoxsDT7NGWbJXQ@#1=sXOJYmvlKLzLn*!3ur@S@WRzfzi!a261;_ z9W@0LRmUqWz48r=pXh;LBv6T41m-YH@PWrTYSilud(FqUzUKvCVi}yUhb%Gy>>klF zEMv7sbAlxpib>Tz0<^Id_N}?*C}2#IoHWoJMF1`xA~Z!~R;wgG?jVo^Ci3c8rH;I; zgx8M{^_)af9m6LHtb;4bT5TG@sv0tK#-oEwR8=G+RY&t#;Is(^g-F@xHLUqyNv;1M zJ#%0K`_9wDCP@IJnU0?XWQoc?ur@{$vYsWt!>OneSsW*8);D_S9XSiqZV^rk6RO6H z!cvQ8BIcHSZD%0!%mSj=AalZVi@?7gtY!kDB{c0AQZ#(AxPcz#Z!)^RiVgO`2PENC zypkQCB?x&dZryZ5Cx&E(jBenMku`9g=J5zn{=QXTuct7iI(lhTxk?)Qo;U=Cc3KpF zVUSy2pi#F-Y+4HT;#}gn-kQNhbyUFk8zR>ezR4hhIACe79TPkvfCJ(VQPP10+z z0h?2ws2PkERteqwg(0nhU~5g*5)vOXR>ORo6IYNZCimc1#drPd=^@`Cg1faEFtO9R zWYpgX*N{0O-1HJo{X~wGBro{_32@zC2l@yJ1_c4hno_}%9h?xs`TpL+QYa@XIH@y{ z>OBpvlTqKO9YJRn^o9VAQ|qe%L(di@p9A*rVXazZB=9V17B>P2^n#=o0H3_{A*loq zjj#YK^doEVK{~z@eGj(uyc0hJ*<5lQ#d{|>A1bioNHL?ajCvIKq<=&@lDW==ZWdPE z(_MfE0}zK_Ahc4UgHB(^_3;rOLxZ!h#0&OjBGUsvgm7^KjnN0}C0vRj)uSoM<^&So zX}5O_2|5hcG1cNKia!|n7@@tquXx1j7(%n7spvcl9|ndz(@{{3fpJ1(Fe+5tBb!Rr z$BNKW=<0&rDvvFYRT2Cyvp|EQ(aaLmkm4V(g#NCykT)^J=$v%)AT315BiV6%hD)em z$r4Xw_6yvQBanqXKq5e0QbYn2@CH;E*CB?4u_9}AfmiMLA-RpzunuxC%vT)vCtEI9 zu>Mbgb7ZYZ*u2CGq*?ii;NOZ*Yl|$>N>s#b00JSR zk^Fukz7vdP6wSXOJ24C)yt5EDzzu<4AE0@57l4v=Q9(>?1?#!AQV| zbtP!ZS;ini&jp-A5Ere$*?1G(@E&^apNs+dNJ2C=1H1^kF^4?(J>`G$g}gHW92A2p zV$6}ZH=_dxdUq zVsiVTiY0^%`-QoP$DIL2~1Ri1m)N zbpL1xIGYS0MpCubo$m$58=0Hba}4bmi{lZ^p=I`UTthQ@6DJ*PXEnJY^geYwp$lx% zXG7Jfh{6L_2+?+oQVu_X5C?+_Eqp~)gB-k=!;7Mz7L?u-m)dhL@Y)g0jtbv1FFNJ~ zJ}okUkRI(h?ot_-=uXryOG15LuiL-O&Quo{qv;?vIMCn`OAb`!ZU%?_H9*F> zUfe%o+d;dz(J(Y$iw?r^Vl++ucfe<*F*-$HkN3@xbg)JHYK|)C{~r3pUf3Iuj+3)# za04?RIQ!=Cu+Spdr%V*ig5G|zK#(J~ps-Jf6PG(t@ibt98O%Q#Yc?B*9K3*1%h7_< z4d$cC&oiIC?f^tjSCf!#;-`;rE_r?GiuArCH`$tQ;wN|CJOXE+ff`J7W?XYWB$l>C zC7O7pAs`oFr|NuuyfMv~d8{#a^Ndn3?-t ziAAS5^0V6g$$^-fE0G$=GGB4&60bdU2+=&UNr?7vNy-PPVS9LF0`xvMxKP_XO5)$_ z$ngOom1ET8Bs>0iTbi`MO;2c?8OKO8iW*u1j!waFETtf*DuOCVb-de`m~R)PazG)1D%ry9f)R3$KhOjxHevTVtfjw>!g9LNqyV{mCI?(P3M(f z6?hT}$N0vctL`Q^QOaWyMSlb(8W*m9n{E5*xa|QY-^15_uy;Iqb(`&0gu*9GX_(*0 zRnDS3Qi;0qD5+ZU2;j3W{H@|defU()e+%H(csN29PhJw{I1NK1KlM9&T< z_s#BT<49Q<^vyCQs)6wix_hAqwB6faZJD=f>MojR{@PC{#U32H^CHc@2Nh_LsK~5) ziAcS6k#?1JhNWUl1tC?sxAnWbd1oPH-E(hml9TV=ww4`X(Vu}Z2t+WkI_WR14P@lq zgDnWgMr)1LV7XS^+p>3J4boUvJ%;_Wl=gYwIjTS=RCrvZeRTiDq) zNGULFQg^DQYI^_|x}!qL3*A?2v(eON-?&K2km*4ITjm99x6w>>`$_xcOD%@Iq1-KX zR`zXe_beIO+MfjI9R%6!Hb@#P`xQ#RovQf*n)=E{pKXaoGO=Okn-@dmux5*_=Wal< za#Xz@!!%Z2$nmeHxb~m`gO^4Ke)3T?sXDYFrZ;7EexBm?ChzO&x8>NyZbx?^w>E9> zci-CYz0Mdhr6lXkn5RaSMY8OfCl%dYdyKBivDeB~bYiy4c5kS-Wf1C~`f>-#CiNC8 zV_tVuzdK`7rlR*9CCu83`S#uDGuvdB+1R90?E?**vfZ|#DtxwY@AqyuxT<8(6Qv{@ zh_PYc*bZW}|8yIqZH%JJ?dSERq)QeMO9&9 z6|6xC^%#3!|4TT79%I>_C?MozG(u-Ke5|pd8a27%77+Z&1_W1EKuoza8s{->eo#cj zR`^_`?qFR7Nw8^^n-jR*d#n5UicY{-F;FUyX&sAR))QqTFB3H{-->CCm*Q^?m19}u z+kf>meO?dxJ6HcA_*1T#4OKJ1ATdAh)i#u@d*LoMOR|o!HCu4hdxZ>K*Wkzgxv;c< zvUvxp!sBZqVcp+U!YtORQ|DaIxB5=>8XLTPH+s(Ib_|f$V;l{8O#Q)Q&Q8; z&DC)SS>A@wZ#S@3>@l{ni3R<2tNh7bw(_ZFAa&b-ZsW-RWi_RG1F0aTKnn|t_frOP^6pFPmFk?4tgGAIi(YM$ox5BoSCOR%eSmD-*U3fcu(Mtw zro0&*)T>Hpgo~Uu)5NJ`q5IjjpAd?3mmVxP(0{2K`PnabG`X)w?+AtH9eAY9-*Jm{ z+?BS=f*y=E3{?Ul+*P{`C-OCO5vd`WFAY&qIu-aWtp5crEl;sy?Rs_AS_?lpV{4t< z=)`ZA2ut~%8*(t^y9h-jYX@}+dfS<8q0li9EZ6!F`$pB;pA_XPijihLXwr5Y_Ssjc zSSerc^`C6Q(93ddQgv=oQ=t#;TVtS1dzKkuD-#)p-qr+0+X zyJ}xKvvFX8KpV>1Qh^&=VQfp$b)d?F<=nlEWYFY(T?tL7z+cqqzPa6fMURmUN-D>S zev5il&Uo1eZ&fwwGupSyWMc&0co~W<*b0pLgwSUf??V2?oW80F8TuwL+MTF!9Zz@N zIx{`xJo)~{WaP)wwMoaOzKnfLnfhEDS;&x_A4$q8o_;ZNE@JvwMAWLACzielES^wb z`uSGd(-YG#RX-vlWxq0Ul2eP+sCl%ivx#d+h_Xgf zvps)FjKb2qSw!n5znNy!1i?hG9dC1V@o zZK3Y#f1+OfWW5}qZu{WARE?DlGn;bHy(VWIIgzh|J?X)+-xl>i*th>T%)__mqHc6p zpHlGly2x6DR1DmTu&l`Pq-pLqB+pX=i8hk0QwlU_^4==12IhriHcmZT<#gYnSGt2g zJzLrQON|>!AC1sqT3e!*IYBlZ$MQ37s~z#)3X>CjQp3xG@UNo5FJP2Rce@A!hISmI zW_ad#i9T$+d3o=PmCcuY{rP#OHbA76JH|V8_9+my-a?Ia4d|>W27S6?>pz51tw(p0 zCurz&Mep`cw)$ip*;z~5wGNX{--&hko46FmSzy31vXLa}KOYFpZ(l17 zaEOhYbg+hV3%JJh3@F4k?JFpaq2+<(GEL~;QmWbGr>+BZRusQN{6NciWuxuRc(skd z7F?uVbHA?swKbW}x~gO>n~PnJ3NyA&r>))IG465fbqyFZc==f^sG*DZbxk!C> zWTMaM*R`yu18jMg22GO*Mt~DQSe3^`;QQJ?t6q7r{g1uGVCJy*rAp8HVj>2F0L&w(g@hT588^hK6H3Ru_&VLph@$JLOB`oe0K3mn)i10dE`Dyj&9$Im#y|<&T=X31g3}%GGJI-Dv79Bwp6NXx_31ok}0~yIxK|IM7&R z%Jz^$u3~$D#anljJi-ku%#bvKXUlNA^1zXYr?ZuV!GsUrkGW|Sne=ID^<~PKQW)8~ z4owSkj~RIuILRM-w|(pv&n9Rq8IR&p(adF_7I!gkBNY(K!7P6E)v{1ISx{kA=Uh84 z-s7)-761`eabhRZ{V5eKgZ4X$1T6*hKKI<0Mksctdh8>|_O{QzpPnpKeJj#(O)$C? zcImh&8^3j*`-)sdY}7oYyL!GJmbKIN2ahr|-TSzLSQ}8}6 zB_r4C1$`O3a%^U_^1w&N(w0-jN5)j5jQQ%USprl8_N}MXYY-hnK;n%z%m!HtErfHG zt|&*-B0;z*my+lGCFWxW<6+03xIvrgKBSkQ~@?^r|ltj zr;M6-zxuGL+q*Ilo^dJ_5119 z;I#fdl~KK_C#Xa|Y3kO*%MGq~B2T!qUpeyS*5Oax8xB2sG}s>P`XwZJQ|qw)$nRHE ze@Gg?v2;C3Ya{CJ=c%EYBYWGX4qTa*)Yiuht!ck`^K&xN(uTD}zFw9|e{~YN<)44M z+dKesRVhlIOF|pB^OYWYm67+!$$A^~7&Up{LD!OUHrbMPa8RkKEWH45Q?_SYz;+bw zG?iAj5d7V$d&fynY6e&~nqM2=d2IB_!d&;Wic0d{?dm$)QD{RYg+MEs-38+MtUDf0 zSt`s(x)Rz*mT9deP^alJ5!Oyy$opEnFriA%uY2=C&9JLTI~qysyGl{k%dNTK5VHY- zHl2>K1G7ka{^|T6@8fHa!rPb~1kKnA0oq!ddK+n`cIzkF)t@%0=U?6G2CB^F)=!E| zYkf_;v9-3fPT+PaKZA}B-dVS?c==ouSYAI%`f-L-)4%4nT9^I{*U8?4hkhSj2Gz-= z-@>+fU*Brg@vo?h*Uv~U2+fTUtD>yaLsC8S^-|{n5<1?wx3OH-Y$HEUVBqk!;7OQ6 z+|y5&ol-u{ICrH?{yO$A#X0-fx8&r1z;X$md)l_2s{<<-w~CFdj933MwMo4ObfD9{ z*I_aM9l(5_cdlER`#M&}WmDsu2K$uQS20QzzZcxC9IuQUbhouH{_;rc^P3|>!!K$0 zh3?07I<}I6%r`ItQIaR`bH5{J6UM$gsHPv@1oCe%-n9kmRN8O+U+DjSyR9^QDPN*HMC=Qb7aBQemLgSUL5}_ONu; zUsRM^hBK;Y=jUyN%i_Oeh(QKnJrxj#$xeLpZD@-Pa4DE>L++BTjma@a%~hIeG?36ftZtKC<7%hOnS0pnKodr;qJBd zgPel3St{7udFE&AV$PFcSDq3sdj)r9JM9xQTwaT3LQQiYAwqAn0 z6a^{ATBCWZ4EREm0U;`IN3)Uo-y&9c9G&-rJSbA@`NlWZ6}QvBi}DHuwlxMQjUzx!1 z!0Eu(EL=V6W^ffOxq$0tsIC8Ok3pg~f)y^?AwX;$oTq3V$7-}zv_4{S(i03;C@r_6 zuOdhLUo()_9jGi@j5?H>i=}~5b5rLY2vr< z!&A=(HDPz}-$#IN@h@I>UvQQP1L0bK7?E=RYlHie|7uv+Lr6f zv%=!mMu^F>LRm`AGfiwxjs4R+_mP5MWyd~^2g_$dj?zb zxm+_0<#~rrc;YLg@T5^Zc&G1|*u}da*6i51liG6Z*!<&qtF8-$>chjspPUYTz`$D- zfmQ^D^4pHHby1zVqz|v#reC+ba2eTlBU5?u?a;KP0b7WhP$mY*jJd=H%|R_Of0;wY$P5-c8|Rtg+uw@QupI{F%aH-A-0{VszJc z^fViL1zVOURWsF0x8}6z23cBSkA;<8owCAq@Afh;qX2#$6}@w12f@Yo7f%-3YZW{6 zI2GN!vp|r@bhI}(iB(KjW}%tUHuCPO;N)RzHd40Z7%N*Qb>3oxC73r+?Fs>O_iA16 zw>^Pz5TIhOU*Ur>e@2a2wGOp0hn0AN=5t&omqT1;z9Y4CFgt{Ivrl~!=sDJkFrKVb zqn00F3RctT4uRTM+0<<1>q=1{I=sNahe0G*lLhNFlCzd67&5pU#=qK8c+doP+u9E7FzAT3e{N=64So2u-!g&-m?7Cn0x{T!*Phz zJWJlgq{^N4qP;C_XN3pu*v|2;CZK=WwK6h;lebLV$#S$mbc%asv-L(>hOrDWft~Ht4UDi#lo6owM$EJqbvDImPQFTjtc0yaI^O!)K zjIlKsqD)V_UslJi3NP=@J~SmnH+Z?X?h^F@Dnzfmjj|A7S;gQ^@*->b45WW`9w+Y@ zMwjB<3Q$g+Dk0Dtw?9?@YqItYca(@5`=YqWAJ|(okC%3@ywzureRSU{S+~|DK+Lm! zVAix3nj@bPpwH#TlJ(x3ffbf#*ZPB`yvAbN<&k+_Y?2wAr}~TPfpG=)OyH{m;1`*V z*gBE#{9QA*1UR~w>raPrhhK1s8&?)$b1HW6Nod|P1-YEekgA(~^1y#!3xQ_3ab+y% z`@pJd!f{~cvtQ#c=$&)jP#ua6sJ#i<_wLJ3&Z)qHRNr|<_3*Dvm$@Bu2U%D8TQ-;I z1+FCPI{@FdFQq*C>X8Iqic_b&_KPZ zTTnR_wAef4)KH^!)bjP-t=*9ENzav$k@?}}9>CLb0x)gUiCrtnpymuZ*4pHPnON7S zC)?*TCgwCsK@7@#S$hST*xb2t4PG^ zewpzvw;34o)1ZfdCg^ADd%)==JL|UXawJ^*u^HMOPxG^7x^Y06c4P*as*EGo8eEl^ zQ`lo8n~hy}^zu%5FxE?p1&`^bc8UmaUzH669Wj)xuJ{PV-k76Zc-#Y&N@oQvGLW0c zA))mOKW+rY>zTI^?8yT#V4M(?YdvOXJ7>E7|d3v}F# zd0;$f2bej1TS*32k@r^ED`q1*9gVlGMfL8JLt7g-!P4b*7R!Ov zBp8mii1hp9PQixr}Z7{ay3O^GhjTE}5OZ3m+L~rHrS17@2-N@=?h2L#>uCyxj15 z{PzVTBKWzV?^<1WTbi{*6rKD$y7A+^)xR|#T(NX!{N|x2$F%PwH?N0L7< zg)w$(MrnTUZ>CI|tSnS6=rpdcuTOfqZ0VDS$!{N;p1phV$S5P?X-)Gxk=o2%lW{?F z3epn;9$TQI_xteD8AteRGrH?sd5{$ri@mma@Pp>y=X(dqg>K1Dw31WpJw1|UbP#u~ z@7fs1tUvm4$8+P|1BsoXHU0MR?g3;z_<7gpedbNa_iq%*Oe8JTNR;{5_RXOE?FY&i z+;bB?zYm#y_i9*g{2c|9*wvur$cJxOqUZeR5C<2z%-|E2@5lF>B)lKJAS?tuCIDl; zqb_z>`(thK3n(A#XUJ0>)(u`z>4g^@2PFmT?z*aQ)~lwSOJpwOby3-Z=5IV*7e? ziwBY6`L1_yeoKb0)|*fx)7>BW+lksA*KcWvO|x9Q-VFca`}eD^OnrLoJpEV=RA?;N zZV&1+-!vzliSN15_q-u;5m-S{mLyG@N;2KhhKn&MV-5-WMLDwNj|sF%mqmA%#oeQF ztPDi^Px)5$7Cq>NN*?f3WYx37nwl~1W4nTNSjF9~2%Xe<`?twv&+FOIq$J;MbX9(l z`*9gCg(-Tk=-GinZ$q6XK>A=`^s`SS7%$FN=|8J{*Vq2!p;qHqVXaNdq_eG>-QIqe z8-wS2yt7G;M{4d}yAk~vEEf+w|9zLNt^Jng?T>9+c0E)Ij3lD3JsD;=|M7-$Wwg&3 z%%C~-HzuEMcG&;+S)g||F=6D9!+y~x)QYa+;VW6;JElHae~BBsb+%1n?$`Dcz2!HW z;hOjNER(+6zL4~B^-Yg0M}EUNjO0JNJCt~O@qx~N?S3RDY5LsLcBp8iHSx@7?!t#* zt2zs7&PDD0xt-why!_$454Y-Xu6qB5Qh?=hM|CfSwVipq`GGZ-^lXyAjE-2-`Ecp^ z7r*a*VJ(lV4kZr{SXXvESao2ZVx+f0{wq6Tlu1^?b3q_0yyC6+uJPx(*8Oh=S~3ah zqtlNr{dl1C+7ZB`GQtU8O#sKOm6x|YT=}{pYnOfJ?wgtkPxBuh8owR6vcSOLRO9mx zW_vFuo{`w%f|eWko(mV{r&o@PmwjwW-j{2)j5rv7SN@MTEHvdyq5Y47Gm4_V$VMGP*K;)evM*Ti>j1ra~DYOurc%$z|nNjlO$i^@R2UK9}q}*lQ8Eo0l}2 zic6B@9=fxG^6WWfZ)=n#$X}^C4eno`7?q7P-8U>O1ehq3-2(^SI@Js`Wsc^WztCFj zy#Kv=#Nq8lxDEHVrFoF2-XKozqiIE~#+}vCx2li5p$#^GzUQky{`zG9xz$4@o$_+% zn{chkRh)fgqnK;`$o}}pdxa0`_kWZ-H9lHs|F7tq>L;A1oMuKMH~3>}n#pwd#PDN( z0Q(fn^~%-Mk)q#A+g#g!J0>5y`cn4(*S+<7!d>sY{PEcSMao6166jx+7QT4)dA=8N zlDXM_Uni)R|ar(|KaJF(TV$X*+*Ls{kJ;fFsd8Hm(?y;q5 zZ=RssxOT~AFdsKbhk!qOn4-+{9PmjKKICnZW63B{Rb=LTQk1FtHcq_S(d4~O;qJxX zj;?*GeR3Z24rKC;gqy?12s=k^O9g)vBQ z8vWyM?G!H~%KP^rOAbAEJmGTqT;eigtV{Ut%4!$o^gPsak^;ohEl^8`5458G_gcV$=hJ}z{3!@UnLdul(v ztiK7Cxfo2{wkYc6v5q>>=Puwh?pfL=9GbRJT|>!hOL>B!ZLZB9e$Icf$<+mVy6H67 z?dh)^W^E{1b1&UbuRL~NOM6YV+8%I*ThXxWoO3;Nu*mh=!zJfmJk6I^m@{bo(N{L$ z6x@TAakTH7A>i{7*zQ;SS#I*)423=593Olpi=J~W11pbd+%v2lm(r7H72>cCbVkr; ziY5jX-yMEw{oF#FpD-3vELSZ7b1d-epBA1K2Tok9p47rkja^uw5| zeHS1PK8y4b2~dlC35vb1g2veot{0{QM8OI^EgL#x_<@Y566)55?v z6iLCVD-&;Tcvo|OKKw{-^Mez}M=hWF;n^ab$qu~F?la9Q~Be~u8+NOOP|wT_}$O$Jnl2x z@-{O*X{!5BQPLAk;q%NxW1CmVfN%Zq==pK1&HI94?>-3lJkWITxR31(4^c0eF(e|- zE7LEXf#~q>hJ0`^nL2xRLacSz6dKC@E!*?Wz-8vpWarS+MG@n&#WRyH4h?-7>MD$D z)$rVM_-#X;myyWvx!?3tzs47lOCyu^D7Q`QZo5F8hfKE5-2!u8(B^gEN!=Z8M)ziS*;+1vcqzF#*-UXpme@i)u}x3WldVd>N%6}^P) z)gvuT_^fx=so2(Kw!;DArN_u%b zwnfq(57rpKwaA|uJK|SY{&rBJ*EaF$X2R!Nt3C|29JKs07F!mCs_iI_OyGbiIB?+S z=;K{Uujaq|@?bXxtgIi!ZR%Puboq35u1(g7^Ob|yhrT>e98BsA1z*`Fd#6_P>?}wo z$M_r@Yl*i72QxQPCJVLT(f%Snw{h^%)K-G#t*SD0{J??7 zp=&19&Vp^M<4-@Og3;wtc>VDs!JXaNoheg)Y(p9M{hXXvoHC@*;qx%z*)7@5ibJpC zUxR6B_iw-K%AR>2QFNg@+jC39k+kB&W6$=4JKTBluxo!Z_jY`6$B%dIcK-&-+I6Z> z%P~0yxy)L}Fn`}F(eWdT8;76gOiq0?86TVZ-0Wyi+xX!4`_ie+zqQ7m+UI#LZ0GxD zSrNmvSEg-VoGnn=ei9r@$|`~HM>;>CyQtv}F;LbYanF9){V*V|9qg6vzWnj@>jS>^ zBjQ!lm42>wmT$QCL&e+9!!P>U;+8!-1qKbh8x1yK>G%AM%Wmn^y~6dAE^Qz9(2^+` zkleG(Ryn(BazZwz;_|-Na=?Hy`83_)wi!fDl((0x_oGh*<|Bt5MeQAMxe3LFGyoE}GIWZGGa)ZnJaQw3>zz4m@XS?@ct`Dhui&9mRuAppBXTNXHLTYvPj zHrRzzmq@jU*g9;FJ&zpk6uo8Uq3ggQ^1E0y`1477>N0P8-sC6O$M-CQyMF#x-_w`3 z@f8n*jl0Gyz?ax(DPIP9oTo%)UggwFL;or&Zi-yoOMus@n_{5JzQI+1DXw1UW5b3M z*7k%a1^P_yT5f<9O~0@mY(2rg;>sVloC|^DHH8iz9;a6(Oq2&c^{L^00PTIrb=Mr) z_Gd%KH?|*ocqIp%#Xbig0WB=-S0#*F1Ypp#=0D<|{IU3fnx~#q)73s|d;6OA7wDhO{@7f6=v2=OaS$K5v?+2C ztmH~u>z#;XiNT@fUwlCUJYs==S-5TH(hz%)DkEdOBh?#&b+Chq-h#Q9nEN}ub2m7C zfY+;FDL>M*W7$aC5nHfdym7Nj?Pk>24(33T#3JRu5CfjBS4Q@=#wyQzXlp(PR)Bgg zmv7~&z4)#E2)(eTU`I?#d-Ro0vfzuf{@LU8M|vD-8z1Ia+Xh_FDeM}Wu>tolZD~Fp ze4=QWp{mEeoBiMp+#$hgFXh0>yWI2>|=?d_@oe`EHN_nts+9!u~f>Mt%djT`ToBD_jkSj>#DBN z%$R36=RWtj&v~9n_We46iH*C|9QYmgF)`k02S}o(htxn9$y)*ue{md1oYOTyXT2K& zk0{YxYU>_@T=U`1z@NQje&tu6W;@zlt<{#XEprZFY zHTOd6-#CE8#*PbzcrI1S0`3c_860}wtNgb5t>EJo!&}UT*Izg(Pg-_%&XHhnM#+8( zWqv%DV~7(y^kV?`{qvwIw$G2N9m2u_S9%+roa}{ev&Zw_%_!hKTQ_p)?9(TMHV;m> ztgV%Q`*Bqe>EE~Yf~#x@#k0`TbiMYr&WSW1;)YYKO!#w)y?FgY=lW?GG8<;-aR;N81nD`k4?NQmfQ2c4M+ zBdh}gwFfeKj3~7ME^^GtzL-qU0D+oAM5m9C(1SV) zY)~?SOm-kry1C84lV@RMgl0cl-JT_0`Yj{rl@W_%B0>bs$OIROAwPT4l9_+bqqL%$ zB*AGGO&@3e2Bs~pp-@r2)N9O%TVKFLY+O+!oNyMM_BFts%ZUx%sS~37lW=dZc5-sO zz9oA@J3A^kG3)F{>oAS8`4-oT_@5z3W;}tdN6fu5DnXcD?27a?VFt$u#7M)1p1#J`z0ZGZU(Ft8$OYq0jqAr zEo8rN{-f@G#?8s`PqxNum!n(ec=;!vz5e<3f@T|?T3Bh)Wy;)NMcL(vv&OFryAlf7 z!JuSsm=7$T90lDklebff5U8rBvTp}+UcIQ1(DlGe(-LGhHXzS#;Gf^L)llNKclIFl zjgI-1dq>+-9NK$N%igfjio|uaRdQ1-8y*+rpYwhMHl%*Z4>v^nsh)1>e!s%Qqv4i0 zfGwr5(>GjTC!K~wukFv;|<+TEF`(qvi4>48O71s>Nj~+DuMChQ#2wv#`1G^ z-TAOTgCAqGj_Wmbb%##sa6Y%W-j*Yz+TBvp{!$^+kM?As;JfjQL$`v>4^S{q3<*ci zl9>}5pT$Dhax1WFzH`b{*5Y>gvX)}i!_(I_hwP)Tb}o9|Jk_vN%#wFY47Ydnq}3QB z1WM3^iyWCH+IL#mb|~H1@xQ$Q_kqH*mP%evXqGU{Uf)QKEU2!=6b8?;-+n#Ea`3%G z*f}wGqm$ozeSavjuyNvoQ1C3hg7-9urldc=l<&RN@_UtrQrl-&`}IEcH7(_0H^)6K zr4%haJ=UT3dml+i+ zMY1;bCL^bM9>GfBFX}6zBO`Zj+V~az{OG2QI{Z3$*!w&k`p>yoc#SM)CrEI)o^(1Qw2R< zf34vQ6cTWBlOM?JwIl#^|sW& z2V_>M2FAPEt%+uA1LxnT+IR&2uGdWoOx+w_HZ`*s5!cJqxf`}H8mLfFDF%mJw&4cd z%Wp;FZdSU@&2!w^{~Q^bVIB_qyitGsc57+Y->&)ZKS*~I2#^@EDBtod49g^@XY@2B z>jfd;JnFaPUaH+pN4I6HJ!P(tKihX~N8$FH#;hvU5VoRjkAG4&H>XuwHZs~F$&cIY ze`o6+fDcv;?zDcTrD zc9H?;XUQd-?olCH3`V>Fge5kq^Hc~cJK9W}MUqd9dXq+^gXuWgI||(ej&cyk#dG{Z z7|pwCmQu~V8u|L}+Z}Qu^}7KPnsPjj=`Uj|=`O5}wjG_sWb6 znWcMx(xsm>Ad)2+Vdjt`a$4_ern|>aRy>_!H5H_F4Lv~S`fgbJj>le@CR{ti+N8Ue z6L;auY7vF_O!4DZ*_w67Z*S|ts1o1Ut`DDm+_4+g@`rPM7rzRdIJ*4#_~zt`=T8fM zmGgkDZEX%cYaC$a_(b${uO^69gfVZA`EW&9RQ|Tx^A!6|WEzTMv_Rl7ZS~7@wY}Jm zTC*7%<+%0l)_y%;Dnuz%R?ex6z2X)&+%o0*zNEhkXR&@=lh_(I#QfRU_)MH>ir>Nr zJpcMN^Hc2WZ|sB?tL=&?0Xl{@nWmA!j23oRhrsG7`N#~aMR*7++DyLx@{~_nnV1(Z_*{gc2(7 zP$wCW(~B)ZRdYWk^S*pJJiUhvq!+neao-c2cdo@Aoh6`3m3l1Ti@@ zW^9F(^;39od{h0o#c6(v^?R&b>mgmxP3q>7-|8E8=E~)bhP5|6XS|Qk65d(fe0#Vt z$I_Ug=~e)D}0<+PH^B^y6@8gB7;OrCLDp2>Ksg! zK9-3rr zfn)moXBAVUpd2ur1!4k$3BHwUS+LHIhT1zSjGd_D@W`=P-<*zU*$+nD6?aE}YN<<8 zYp8n=>V2clgO%lru!$)CxbKw2fOU0sA>ewxhwXw$bJ_DJ91b>BJV4AJ_;~W;R_$^V z{gDCpFNTP%B-VLlHE+78%>*}R90Z?Yvhb>;;a^oPP3LFV;_cMlXi1;)C2E`o!WpX3jG~F`MN(p{Mhg|5(yo7dPZ!Qi^_>(_BF0&MJVgT`$916Q7+@;J zX>p!3PVfc zybpe|!PrlBMcu5rHCMQjvvt6q<3DRz@F-1Q72t)7PuI>9>-tA2cFiLLL6H%e30Je- zl)44IJr)N&a{MMi2>ns5W%L;LALR9)ZA{8X-#w>#hqxTL1MVI^bK~`P`St8qnZC2Z z0`%$hVpTNDg4VsNDuBN%fZA;Ka6Lx}kT&7#{K$IqAxatVa4LVet6Qn`({moFA4MH z&6@|HdDPzJvrjvx=VU!6R{R{4?S*q6UtWH`(y)K5lU$D`GVNq)7h&&FoK>w0r|_Ry zR(6)VWCS6F_JXkGa>PXn)alTJo&8ei2sM%bpn;O5a2+ZVQ-o$D8R&+|w4W4SodqEW zEuYu9tu05}{B}UkGH;MQk|k%XYddo}T^JWYYMDb;qG}3*9fE|_MCtYLlCpdSa*lj3YcJT zyJ7FJldKa|L2hVui;owm62D3ffk0Q>0^ zZ-8+%)@KFTy&_#R?8_BJW7^3sMljWODX}@24oM&?0TF9zfAk+pitQ-y?0UjIcxMAb z6+EFEm2eL+OjTwx>PH_l>USlkEfd}bFvHWk8{ckgQ!wBoC?K2FD^$@4!zHgIYWLE5 za)x{!hmuYh_F7F?c7D>1jf?o$krVl;ZMxmp`lOsyss$J1-Nj6i>!*r~W#XBvO!Y;g zRtT>P<5t+&A^5xmX~)e`ORr|WqM}YQifD=5#9sDsc*)f@Iybny| zH%ss~h3?~`gThM-hZJsUcC=EuWNx6ZN?L$Q%Z?@LaJ%WZ(nqx@RP{6e_}{|c><_Dz zm?cX|5;b9KC%c08a%Fj*%9zItpeLDuJP+9)Cz9z=5H6nKGVl4RR5FwB_1<&8R zT`WPb@cg;h4IUAm<7lWUAXcvn^^rC$zywu)KM4QHRo1mtaJv9Als4vHCfNaN{ z-2HhJS8|`ntq|_)N(6sz$k3nfHQu&&3|dCp{n23MQuB|lPJmRh!VGfC>@MC#yd2T# z-onj=Xi6?x3U1E-oU|4o%T2UQTu-Y2mSpHO^9pM z-FthN_0yEHc40S+Y&m(otv=wqr>#x<)Yz+y6vO;~p2qnz|M+cFm7m&Au$e>F~d2ZvBZJ%d9hh3{X8U`#x!dI*V#I z2-o%^g}~u20I-kJ_(eMc`*~RUI&t_fyU^heX%OXyYKrs5`v_w>Lw&L@GHF zGW!40}+`ux!)u-c-|&S4)msYM~=N443R zAVz!I)9d<)U4<8;XiB_n5K;BQV1k_*kLSz;CCo8z4(1MzZhbIQ+AoHiZKOKUs>T$Z`8TCd@k^A46@(e${YctE$}qx@g&lT zQq~b7tk5m+_M^-h1Sl!ANLue^XPf9HLk9Vg%**E7k52OyKaowOsk1dM*vbp&k&um0Wy#)h%=&Ty*xewj!!gpgpy*Yq0Z(p?UI9gHCmL+Dhd zP9(_Tt+ctZiEL;-$tX+@otcplHq0@W4#tsAAv+PJBuf#hW=0HnRd!M>==bh!Lmh)a zl^IA3j8r5$z$C0X5o*6j5`fhrE(>y)n{$^wbB!i4f>CZV!QZJB6o(ryXE{53uzLUg zye9JdWjh0RKOMsk2R*-p)vp8g^C1b6E3 zQ-$77!Ld%gPo7Ff)3qytz&Q)&{K#!>A)tJ|Gcnsw_19n)Zod~{vTck8I<-=s!kf6&7AA2T0z6xat1HPj;+x2! zN{MZ(D2A{p>545f=my9bS;?!<`0)I%H3n+85swb2oioJHl>nC4MtpD0FClP8X-Z84 zSoAosSA2p@#rBhhZLTNrsm@@yfXH0Z3W)ijfBy-mb8z85iT5@V7!Zzp)k;xhQ@KqR z-46oaPqk%`KV0R1BEz$wy~b2`Lc)lf;211#3?K*ABUAPZ_VQKdUQ*N8=Exb>FF4Fw z?GF;Fg}Qq}zXMKweg1kek#DIn&`=y`peWVswv2xgvV0axLp15*z;1%9@peP;y`jiR zwWf=4Qt1C>NZl~|g#`Cl(|%w)|DQ3fCphxJw+!t29YOZXq(l@5E2hw;iBssJ5b*{) zaMdF}ib@kNRVI>6i2xi7isR|95Fld%T!H<&IPeTA?zqUb7)FPZW?ph@%>$0$euTm8~f4WD?8MlzU&lBm$CNtgcA`{B9a|L34ClN-_~#O8_MRPNN(cLK?q< z$obnv0|=t;p@krnz^tZ~lmKhi2T^Szk*D0z0p#(HEY9ztZA3sEs?p-63vU`*g~tGX z$5&!$)-8`x+!7e$!8Ca@C=jR)677Of@Bd$0`v15MaXDc)oPD(7pLw9Hsv98$46tc;q}f^HvzhcCTdQ}& zSQ~YL_9Hh5Vy9wi=XgzhZ09#N`#p%ZhL;YxcN40|m11^^{hjs64ip9G1il(KC7jbl zw)E|dPojx4 z=y!3k@23%jR8&+J{_ViT7VSg)CDILIy|Qr{eyo30?zw)IAV&^q_(%W z`>)-&aY;#_a|>rZ=A4Hhq~DLp^#!NA(QfYUj~+jcxi|9hBX?2}_N+{jf zV@z;W-$30oP}7)5;s-T`pRLyebHtu*^;p!voHF1}J_$ zq~02I)Wh~8^)%_}X|6dkMh8w1nr#sCET(j0)a_>*HF>doB8hM;LrV3dH;Me`JeEFe z2Pmar3j5QI8m?NzAcPF?ok($Sv9EG+g^$#Y_yr6y20}7*8DiS$0b95C0iy5f#B`@8 zkbG3Q??9MzQy}e7kZ+m1BO9up{*ms=awdoR`fuKo@2djE0D>9B_4Xj*dc>%1ZVqf% zrwj`l;FTB&IK*eFBiZ`ZlQfR&R{;nbJ+9FQ6LSfcPe!OR(!aTDfJ-%CVdDZc7kthc zPDgbhcU`9Jt#EnKmBg=K%Z~FA0A%ABGA$sP$oN(rcqJnc`qLk_&qUIUfquUlI(DjZ z+Iol)NIWI2s(=jFfr8*QPq(W1dsE^4cwRO%lWtTs9k^SA>>@KL1+|Zt?qr~bbO0%7CB#?_|=V$dV%0`QG{p00ncQ_He61h6+k_^wAoh zMr;R?bI$v3G~R%~O&fum05k$_L`Dpg7}D@EgWcSOSW~$F10j9%O?e(9?;W?wbBo?; zjS1tx12vw@Gx5y)`pP}+AI_iS{oM6;hcTcB1b1)Yp&#ED*Pmh*5=du?;T%1ULOLLC z<_7oL-gG9!+rOK`3QzWfZLtd=z`Gt+S7siG{CNYMS3BPtA#0EyWI^zVbzos(yN^i% z6X}8y=AUb){i`~lOTl`wD|L3QS`L7}#)WivT>>(Gxh*InBIZa%+DXa}?s7hRXV-Xr zvxPolOP5Q}G*whX^2|}mQs0#+c;WnLw$E6dp)&Tq6Cewv;E1QTk=(!A4N9B~Y()+r z-f0=}Fh>#1_&6O?d}o?QYseegd|gqN@>Ide9N)#*3qbY+ssh0NDB_$_?A=C*DPYg4{za+Uc}mn9P9ZiA9Ia+f9vGla;rfW$6o1>FzmQk^a%WA4EW{5mcf> zLRetH{+1dPLp}w7(!pcvc%Cl&gf`vxYO%h9FuU zE{3nXcCtBp6f&S&h@>MgbWE(K!xU93m8cV|mD0rdTx9xzBkfVGS^j5=ERKAjmI;Ex zW(++r98;P8br70GN)gI$U4e~-&3`9ZL7)srve19rdgCIjrSK855Jhst!w@x<@maBG zhyy4|1?a$0_9T)kNRW?_7%blUcktr5i`C(Ta4mLRLPICa>O-ZTjyoHqSTt^>{=DB* z$G~&jQ{q`002DB+pbowXVT_@JZN(#0?iv8nBj)WXmXzy|qQwmYgx+0rK1(ThFxw;x%{B*uod??U&>%`gSwb&D%{oDO6 zq;wI&4@FE{{POlJV~Kd{AYbvxbGIG;CSIzkBe|wdIoLU@g;gy!L>TQ7lhu zDbO$%vgt|EPhTdjgebAN#126L=z=d?@Ng2-vbB$q4r~{ zgAf>q=w`&`WQL4~l=D>Y>4R_60n{$6fD+$wPmXEr$~)-S=W*+~n4xR=r;ae^Cq!az za47YIV!_+>YguvZECL6Ao5pIO^e3K(M$@wb)}+i5U(20AB}A%gfbwVa;f!2jE+PuZ zcd9;XZ(g3Y8`SK7`1EORZ||j2_I?aQ6x|lwJ~nqF+n_(|(vxluCI&Ks!&iV6#8?P= z<2Rm2N3Y9GubWcM#z>XNfI>?7{+EjuqSSU%;WFADTf&`eAWI3qc4sC7(^$w&!-EQt zPk(m?6R6gLBEcplqw9#X*%K*r^jhNXa#mbLbY@hD6P)0fN1;nNoW(wx1 zWQrZ?<{l<7gN_8y3$eAfYNTsYcm;lg#ZE+XTc^ zIKWH(vPb;1KB-mW4%R=GUdX30l7Z_}XfPQF3)f{6stMqGIqWWj(~dWLOFy0u z+nEggx@yYcm*WO@VL+VE6MQmk4@T4!DAqz zia@mb<5SBJ2-Dgv`^1blu;;fb>}lnvWvwjwY$kJRKuPv+KR*;6Uq;7BgH)I#p>NMNp? ztC$5+)@L6B>`IQs<~`P3|Jgk@a|kqp39erZ${&NUlE=fc4^Vm_F?{j+9Ta31HAqxw z=Tjgs&gdK&NhAnfdxCJ0L1VyIB`He$j6k5N25Jr3kE85j2L$HTffSw{hCbu30n@&9ML|E_ux zT73^x*_JRdW=2F1{@()i>^``it2Y=2T6lh^9ItJxi#C4ttidV#?8-#>KUFE~$z2!6 z-`Ni6dEdN8)GPckPkGNRdBy5jHwp*UWMKfwFTJB`R~w2# zULvP0HwcLhNui(6kLL;N2SG9@IJ)Jq4_}~!O-aM^BC)bPnSJ7|tHmonl|aW6 zb{>xaNbui{Sc{bZ7gW_89W}uDRlEWgPnh&mZ~pshr2#aXSdl=Trdfgk387WU0rlSt zlwlUOfBw90QU>ZN{pZn2lrLYT=D#y_@BNhn+g1%_j4|cKsPi@idWg3XH?s7OA0?_l zDh|@P$qWRW(?7rlFVv0X@Icf@o%570+n8xexk0GrM1ZM&bG^4f@ziy>O~VB%a)n1E z*N>aP0=MUdk=4QFz3Ygh#7(Xg!F;mnnS+6rv}tp2Vihigji7lXf{4r#$u10xy@q#3 zsF&dUL(!NXx5{Mbab=Q;lfmN}pulq9*KY(Bh@$a+lFz&D*KfCC*rDJ+KF}Z_5H!f{ zv2GrxY39Ps_d(ZH(ceMU#t;+$v{E?75qp3{%>h)4M*wi1YqTTj>mFh=`bB+Eh0b>VC<3J5c)>TAS5pRsMe}@H}2np zAZP#>f7zI~dT=`tXdfhlP9$Cd&j+G!8Vam=={7{ta{B7vBbd0r1x&A}i>|>Z&~|)O zvKp;BU+_h6S@@vn4GC4lHUqTM9+*7~4;fT*aO^`ckWIEd)KXZ%04?HiX>m+~cD8hE zR21}xqw_#66d?51KTNBF%Zs3d_vs=%SNY~v5Jn%cipqB!nG%#CP*sc;2xkRZm>e`^-Pt-7Y>Ota*CRV( zpl3-zJi4DY7z}Epz8lQvL4{#&nU%|DZ$yfR+3;IQ3_~n}1CVLn;6fi2kPm|f1N2QG z*AV36G5Ka9>P9>Suw+Pm7i-&3!(E;((BmxKuZ0RxkT3j~-m7IgcD2wSwclN=vgs8B z@oW+3#Y(D)Pyz!J9FU@D&@R}P#tTOd3q2+EB~1HnHb2*Q#=X@56bz_w)gu3ooq)AS z0`?-thx{M2@MK{FNCyIuF%c6-@0jSoX4X$P2Rev=u>szUm!baqQ>TCP+tSW10JcO(DG<8;j6aZ~%*x~6Q=+13bO zkf8C4$V)U`!OTZNPcV?Qb8{Uyv=h)kf;P&fh(K7F;Lww7Xbw3Mmkc^{(Oc}E_vy@C z%KIUVKyWP8l+h+iW3RE8xu_dqo!|C@o+V0V?&-VacB2ymX{%jvW!g;e zGGCyVJFU7bz~W&RlU3>(N$+Yd(Tsaa6yW zK+x4K3--NpKPTn*M$n9M+rZ7qy$8Fu7neLH3AbmIWtOZyq|f8mJ{g%yww-JtWV-Gg zCz2O`suybG9=?h!5dQJCj7pSp*YD^t=A-nxs`rDd8#MbJDu106nJs@-`OCtPhXdnW z|MUbCy~k*!M$}##SA;s0iAm$X{}>q?`>xMwEb-*H^e@(Ml5ebxR%pd9N9+iILO}m{ zW#yj2leSL5!Q?*21NK}V|J?p8x1PuFzLTJrqYR#>4~2~dy108?!sHMK7u_4d9W_~% zskQ!ij)Y*6_*9AZ@t{;F2RMQ#qvMzzaOOkUmljS>t1%LlSy!eXo_j|878z0cSoMUs z-}9hMv^-19r)Gurn)(Y+`%5x-dn*r>u?lZVHf;pnBZ4jHNP5)EFOQo@qh&*e*g7W>0&dj z&cn^i#H=VWYSwK{%E3*H$GSBlw9Jfzj z{c*9S50Yh?CWVL+@qEWni)>4gOAwyV!Cq(9LbXuOvgMCv*Xp-?LE(8Xl-&!wG-7k! ziXyKlX-Tq-x5|7?A#$IzyehCNB+3U1`uqZdnar*;4qG?2@wChm&5eJ5`DEFlb>aIB zhSKL1${nMXGFTZg1&OCl5VNellZYaj^xOI`n(I<-jCtRF@)Sc~9Ls&8cryE8CTk}?Lhzqk@1Lee%b%B|H!~dJNe=J@ ziDF1iE^kr99+cGtg0H26ai0{u6>rGxQi+x?8(Qo!#HQ2 z{F?@yIx>7{Ifah#M>qwE4NJ=E4?%rLA+7M!53jqy1#%x(C>wZ48r);bAC<3uS&=f{ z@5mA>37(jZJzMco;R&!?)Ag3n3xvOCvftOq4K%IY=mK+P(af(7i?S0CbPB7{H`j_5 zdDvUrpUK7)QyPo0H+Q5jy)=SO;;}IKzuO%9kAw?1*k-E&X)L!dcX6x~c8jbG%r2+r zPb%K_IOBu9;X4(V%+Czpw_h&ho8KU6O^It$nM;v5=g{cPGJog1#UI;OsSLi8JI5nv z77G4-0Y7B)t?eoCHFbvk<1b4S5YYfUFDlmEzr-Yf!)QK!BHhvbiI?#%ET$BR;QBO? z^+fu?!`}EBOA($8;tjcz@p9NCDVVck?2#>+X=UbiAgJ>HSynlJ_oZ}dpOdT}PI9%` zU3o3+$Bl347?L#KK=rg)N|BJb5NtcZ_eS281<7MSi;5n?W3?F)JTBJ0R2*|C1%MEh zJCvEg93TIb=LNwF_AFcB^0PYSo}Ee=^LzJU;dqg9ZG+pz`fKC93RUio10Uq=!P!pr z&%5Q9!0XT$5u(M~4NV*rSb|Qdig0sPk|6&LHU>b`jy!I@Jd&QF zvhiDaWJkYdvA9%g2tlZY9PEg~SK7Y(4nX zgHkg%TdTCO89$zV_GYJt-R~QPp-Wamk$-$kOH2P@YrlVVicsEQ9 zGQo}1$3$h(Tjc%5-~LX+2DhgsOhWw=cRUj>R@(L6^tLm`NxE248Q2)!kB}D*LYrRK z*XAzWNv~hxU2A-n!C&c415Q7*%1@ziunze@4vo24U%xdtR^@D9C|EHdeY?3dIkhDm z+w>~vU7B$eGaTBfoxVZzykMa40Ld30+o&{PZ1C+0M$9;w{f3;usg-#uzW6Cd!yF%N z3H8C0mV)+nIPFm5)U3r zWGoEk=WKOOok1Y-=UIn}T*~2o#pk+ef3tvI{V2Q8K9w4 z{?07yXc6^HD#QEFWRZN6D>(0)P#$g+iUCN4b1_~MAQy$79 zwEB0WLYqL?voXBspV;j+d2x?x*L(4oaRt$`Rc_$$*R%&=<;SxICG%&$`{Cnv(x)aT z+&xU*7aIF^aT*f(o>>4hd72)>kT!s9I2^FC9v(OHzhD4YCp+*t@S$9= zCls(PJMHF7Ji9ZK9h^6B^thzUcs!$3R9Ou)ty*^t8dr><7(-Y(*|oOvR&r{dR}^L& zrGtTP6gFMxk?&0SR4k@wCX#PZ{Jxh4rHU zYib0U(ZNqo7C3%Eo=AkHM^g+KnxT0>KYv)A@Zr*u!-VQSZOOYTK)+qMt& zA+fq;@B!oM=8{4S>tPcQ^YjyfoXv-YCiPx!Z7iObARWcIGBKXEll`0ZAPyn}df;i{ zU2)y=$o(f5uh+h;OYywWRypAHr}x1M*c@>UX#BB!9_&E>m(`(-;T#sr4>yJwy~ioP zvfRPDADsDsThHcf_T9Y%IKyy(f$z*Si!F~`FMDS_MzAV^AQijWEhF3fu(G6t7J6TuOvPPb~H3=IF^#N8}We&`B^33^%4-=B>;nYoV=k zDgQv{8T|K|7$)+}KOdILR(|!A%s2TkS}^}6D9G>clEU!^0!z!RgWT2hSaEsWPKGT& zsIb{-*-=>!B(QW3Lyp}yA?L>eTtIxc%FK0OQ~=2Id+mqgO^a**lRHN+=^|SK!b#praA&|2A^DscOR1Uc z*U!2y-1VNOG@fv;`HD2WvGV#5uqoTI{va)f{oQ^-kA~`oOyTMoi>!NPPL7@Pe_Kmj%0@>k1#_-{_G?c9+7MS4$W5(P;U$ zs~9Q^9x4I6du}lQI$Hih&sgM$v6w6f*MQQvd0k2~M#Xngr%SZHtEu)6CRa4C6>iIT zRg^uE?ioxz`{&oB;zqBaq2>E&It6=MA>WU!GQR%_nA+Mrb=fjjn&IJsc7MNFG%V$v zb*er4^5x4HUXu#--ZO*0XAUF&y`<(5S9pAC_;cm+ zr_zqk7%6i^&eQ)+<=Se{-&*U2mp3H%pLH{_)Z8Zrxt> zP#Q0y)usw8jpPbkW9Zl15fEDVnltFvo~@v|zvlfIp4X^NI(fXv^)P{ZB0u9QlLir) zvXkN$VqXxsJ9u^hm831l$^L6Fd211s;y+7eO9QmD^>CH_)C%o$W5wN@^#SO-{Ne@- zU|Zc8=L+WL)*et z8hxC>1(HyO7daIejK;I;5ZBLnJ(fj?*MPSR^4}x`&M1owBpcLwb^A%{W+rr(sC`?? z88uZNJUL6WV?mnP>_FuVlKx2J{ zEyyo^B%4CaaP8cCi`Vz zX0Ke>o6Td;quHC1;r^CuZk>zngQy+V(z**|{k_@0`lqQOrt}3^msdN_? z;&?59GF?7ejfS;r&p=U2VJm7wdR{|a4nga0z}fxU@!b*TUkYA>pA*1!r2t>G6Q1gz zyki1jc1J5(xHUL?d8de$H|R1Q8iWEv0ed@o*045Qi$m?Nj+$M_JaVnk|7?@Ot284f zpl}=>cuY$3Y8?!<{$UPZ?#$HzI@zUuc{AT)Z&EAa4;;K$aJThoQc&}4PKIy5)FCG8 zPrQZkLyt6|VZXoHu7+;kI-tb`qLr_Kbp2?EYGHGbs>lvMk_JP*)>M0eirV0rAelKp z+hP>B4z>+(O=|+FxN+mG-xM+A&{cm(Qtrb4}d?{wjCZtqGSwPSX#gnZ+%# zovF4;?HqA~!v!YmE8a8I#GR0D=Dv=YjSbd7bb9_g*P$a9|8-}y&&;#y{%c;({t2GX zwLMtcpj~B0r=15JrMfz6O(lqh$;;TF-rLQ=?M}M>X#}lt(8Y0sc#Ph~xBjV;nq0^7 z!5o`#M4#@o&&n~|Aex*cht3C1_E=pM0Z@gO-{0N+_8Dfv%H!<9w^&qvr3{c#Oy83e zv8rYZO+(uo<${KoV(oWo?q`EqP$@lF{LAxYd+n*;oq|2n ze_bummYIEm*9zNNiOVI+CGF1*4ZR2?%(G>;b{N+bNk{KO z&*QACO?`SGFm42n?92&5l8u2fk`~W;v^dKxzA*_;?{~M)sCAS-Np9ZS_Gob3J@zdF z1LTdu$;gzmD|wh<8A^d*-t24-w{7IG-@)uyeMaBOy^X?%gL#j*Hf#HGD&(%03(UTk zi5ySuvj>lsxN~%fPtmpP=q(`C0IaYg(fLbfm@fe((5m)j+EKm~G}9ct(Zg_N;uKkm zcfSdM@wa|>>cKzx75p^-MUL6l`LjROR$l;MDC6z@Fgw=;-)~(n4cZ3Wi?xkl3I$3k zz278~|I8e&vgrDm%a3-LiR=jAhL#WK*g(pB8s7`~v%Zgg>1zHteCwF%!Q2B#s&Trz zJ~Y`uW$+@5};V9!?u}tZ4C=4!5-AE9V z;5$+E`MrjXoUV;rXXJh|e|$HQLK)v89Q;iB{VB+k-1;j{y!m&f$@EU%$L6`w6TFF% z!%$awi`f;^z^%W_rm(z7CT#xeZ$%ab&j2&9Q<)r@coV94^OYB^NBH1AtI!^rlauqa z|B%X`_ria<$33%Ig@TC*3G7Grw?zDY6`8-Y6X@3iDI@VQPXr|i*E=t=@+3d0lPFT56`~1s4M{pAbYZq8Slz3dr(|_b97H5*Y z{xyzk^TT@$aAVT7*vc$-Ne)rHP`z=mZo`bUtGrrU->2z+4K0q>ROV(Go{DFlw_KA_ zaqUv}ag6DC3Pp#eKsIyrj#saJf~IMe>F$kK9FJy(GrWY0ic4l@H6^_`+=GH#1_*^8`~wU0 z5BGgr_A2nCg<}OcY$sRb^ib?I1o)1?l-*?Cy%Dy!M!ly2n|D%7!x!mH51Q|RQM&aY zzpM%`YtO*-LUonZYO z>-{z&lLDAsS~6JD_LHsP*hF}Ne|v^@#IK~V3EiiS`Ds4IMGz-wxlYUW9bwZ}d3Ei0C=4O+;ITF_SCVoS zcX0I_xR7jU_6zbv$GbmYIwF3tbdJBQPXI(%J7t&jeyWHz`;@!%m}r__#3!IHV(OoJ z6+coPe7|aTyih<=4dDl5FGiH^3*!Y);!yiLA5po|%ae0dZ3vLm1C4K@2P-FceSw~B zcyk5B=AQS{7*Mx)am#>l;49Kzj8O{~w)$TDC)ZMLUu;ea=f+IS%Oa19`BraUrv17~ z!mHqaLKc*Q9m&=$Y&?g5`+kp4e0&RpXaK_F{vdO2>e}m=+gd__@`ndbnI?!m#p@w}JwQbqkTW zrWO=CS1qkjdAs=ku=nP1O`U1qcvWN(&?JOFaf6g4>{4YJL@_{wpur>%5Ng0i2Q;`$ z#wS8i!3M-Ebp;ebFsx=_DNjsGLBzHvwlhU-?MY>*%k(%?r%_>28%2unyH3EZ?acFg zp7;0r<9$DGJ|E>iC+B>x?{(eRec#u;CcL6KZ{ulj@Y6AMcmAm+9=5#45j`5WIrC)f z?HxC+CCBq6(+z*OgcZbk8N8rEmYo`Tg(ijJ=r{vuFu#$+7y0 z1tn(vr>9L<-`*(Hd>&bHZ5B3j#`lZANVDIJS8iYUT}-g4{s<2wTK~M^Gz{%;)Y!N~ zI_(rJ7og5`sy<-9WMjw~{ELf~f7&R5@rUoZc$u5J@z>V?#eK<1>P2*kJv8OInBIc@ zS6sJ#)0wn1jQ6*s$F!dh&)YS&VbKzpB63*y@N=&syGv?!k2-AE?Wj+7{cQV>tST@5 z;=G`ozioL;3thBmvu0y1aU<*wcWu4@!%5rGuj-e!y;a|k|NELh#T^5GfrE01|H{`#2hk4{dkCD$L?U4580Zz0U?xbt3A9}Y_(t$x!Dw~hOf-@mzjCLD^o zQ8NI`!-<^SKW}~?<~287gh7^TUwKXZZRg|v+!XKl#gIw%8M5xt>A!!eCT7RPNFjed z8@of7a+RVRjt_~KyTk4k>bz{XRV^yzXVH{cwcpqowL;a@v-@Mu8Tki^H@!f*rgWvAxJyh}@?1uWo z-##be2e@QqWvSl33}?6>Svf|7bKK9uKIiSsHMT)LXcTt zyN6~tY=h#42i&ji`W^1pxh}tlX?sp?E>XJGHq|vLsS+k+kI}kL5npSti6GgKY!`Uw zq8w(*K=e|@YcsrN4pc#F{020@KDJ1U!<>qDYKh)S%ToDnIG&lfL|G3+HpPaO@)W#i(+wW(-chr+GNEix&*(oab+{d0A z!BU0Ij8A4B+_<*glQ46e{dOv8BfPVEW-Xk(Fk`i3)hDamYYrq4k~}XGh6aO@UdZ;P zwt}AaOTbV7@aWys00k;m~p$8eyi@sz=NAf4Q+% z>b)&>&3_O$(A^juIBMfwJKN#?gx8V?9@}(BSH;a3+&KR&yl3;q z#Y%U=NytyUrU7gEFgJpUmqIsAuxi6`n+TmX$Vu1_^VQQ9YaG}x@Yf&W-M{|N|9FEq zBR2vcP9bjWIQ^Oq46^^DKh7Kw;I;WTFqJa;c;rHPXz@MwoMOyoDj&fU0=$#ACBq}W z_GPbaS`bHYxpyKQj>Lzb=&XdXwg1e`thv2sQXKQs(~5CRUQT8ce>1Eh7wCY{L z#`rBR$6j;X6kwCI-^OG`hrCj^*@obtj}WPi7}!|E&I3PJVG@&3hs*OV-> zj>5h(Ha7N;;bDB|%dK14hZn6#hKlT{y&%jRMojG`uQ6d6$D5~`8yf1@6Pnul>Uw&j zVFM?0#}Gl-wI7!K@ESH_#7IF@o&ZA8&B?+JBVPJg}S-u0y?z5nwsK_l;6-Lrme z%%ZcqZbnC6Z@+VSXYo?Eia%{0gbtqKhx_-Nb-IcJ&0PN-7k=>l-t>=u{1sMoZ}y@S z-rxKu((r|!ci{iN@3EYCt2*DP;k9My<&$rFuY!`Lb9j4T*?cJ9539FEICeE`N&SNI zN#&sDAMB;1`SJ)}Zn?_qJ^%976R*vGpT5do%BX(55rXcM_@!s*?1sx5E|KGNyYQHD z(DTpR{;-7rz1Fl-b6gy@zpo-~4A}O@pSNk8?4i|M0`2Ij*K~wk@6O-|G;5Kd&o|ms zXMNxRr?hTd^6&}3sv6MO;uhG?s+#ex z`pvJmO$~bBY{B?OK#kHRL9rRJZEN>)STC0Q1GQ{`_a-|+K2R6*ycG31-CyHVTPQLX7O*doK%-4gK zI9infMzbO|u7b?{4MIey{h(c`JSBx_Z|1B)~AW}6RFPPms14hX2er1IAEo{*16U?9Zi-J`GRlp`_Ues*iqr`1~=8 zWP4a$=cObB(IUns>_}2Bj8M9zBZt5RlQA2FWDB*I(HCyMh}cyHjh+(*hWL{=%;H??K&RGMS_X0qmCZf-uT2 zcUJ3w=f!~~wlgT7S4w|p6Cn+l)mVP&3oztbSZKDv^Ww%;8#aFbCaiR+zPi7A)&7t7 z(<(mpgu?Nr!@x4w+8#1I+ z-duWQ2J{q4=`)wIY++I3QYUwNo7|dLXjPu_5Y(*7T{KG&F{^Pcn06L;`zbG2x=h)a z`}xLPoD;_unK|I}`hZia*HM4sE;yW;2c&@&H+ZX`b@dU(5D#eoh03qZt zf+SqzC|BYktYAk10}e4h*+E|9x1&*AN)15+TGGXCXux5)r>ZWi1*NKLY{C@S>WF4nm&KbCJ2hS=%4*+67X04oAO#HumRr6W>k_msG7Tr7~z7*Vw1MlmF+p zyz4jlRW@huZ;GduSCYtJ0q>Ph-`w~2ug^}N8U4%E;_vNZN8cSOnf$pJe)(_g@#OIB zpUvxv@B76*k=-6^zy0I4{ZGFwhO-v07xe$k?>Eoy|L*Ivop9iL`TE)ZyaL7zvM5!+ z+v^Z~K^ZcezFpq!FOc$#ZYYa=QlgC5DXpcJ52`Z>ZkKWLLn2vZA|igR2>aORN>q zjFL>gD(f>tzyM*%KoVrpAoI%PZx|1t9A8VC*?G{pO4D;hwq>8s@o=ZrXqj z)rv9Dy8{{w~r5)G#*~MjvvM^rNq*OJ42}D+fokcAv&y71RQ8)BL zqVm=|@of^N)$Tiz+vK4Q)*F>CnVym}`r+j8q$RdwB=N%Jr~O@SFX^wURgOQseSKc> zPaocL6sC^5O=5Q^ER#RJG5PJh;%$`W`Zg?jQdr{MJ{OW=ulKdG3B` zPmLv`rz$Wn72H;)lyGJk2To z)>;A{@9Dq$?d^#V@Kfq5k+bV2mxKpO)+eNK68$Anfl%4jFl)WjAh|s@$g?J{>4`?r#EhZ?|yVB)sH>i`pw94uBchIX?LDOheWjzk2 zxM>BbeeF~du~||L7^;r&KT$4co?SzH34n% zvm?c(oTu|{e<*u#SLI0Lp`1TYJ}H@OFR{cH-)0iy+3^c(UXp4R(JIwh_3|?(JXa!0 zMtUaA#y^fPWh8{3n=2q{sXLP6f`u*$YaWkA#*TKCJS<(joc40Y|J(d{z}q|a`>{Wt zUDq#x6(ti_a>flgqYq;zca=OjS@Of%&PzdAEa5 zRphxy(IBl;=C)tr5Rpw`h|_ghHu?;Nps-$uQ$uA8Ck7i~vEz-)8H6JS zHj={DrQcEQz_>&Nzo4(Vqq4_evRg|VsmMi7m zj+!vCgQ;N(KYkO9gk~C9eclwf8!U1`*{-O@9_w}0H5iW40$57&l%jvU&4>eJx z1(F)WEc$H53c`pDFVZS1BhExq4LRa~;bm#W0(v;zpM=Ki?S-9EukuT5Zw&Pn&*3WY*nZ~qPK^6D;rZdv>`$AnMU-Y^2CWJ`RZ5lJb5)+4sd?g~%5-zWb|GmGq_R^-3@)P7Nt!^)SmFBYeNpO3jCM=Syopd%nKA4{`a)y# zBS}#QR6rW$cB&>qoSq0FpKJ*Aeq{2b_qET{UPn`>K({P7GJG5{#`^z#)z#q~Scm(V!^|5}I%7D}+)SWKvz|0+qV#g)fv^ zwXf}$N=45xy+P#F)*#u=;qW_dQ3?j&t@Bo0NB5W$ZlJBjZ)0^?hRjz9y-GtqXk$4h z)v)S}bV-)MMXzSwhg1=(&utyTdfFuQqFBfk{|sp{Csm{tTc9X*6p!xKHNL@)4EnM1?8~qAlO=O zV@<}hB}JYpol@1iti~fwm_E-2GL5Y*heB_PbEe;%?TR*=vXa!baI&*XUJVf_ou%nG zPzePojR7X5@1v<8;^7ei?Vx0c++}YH0%~(#B+HdHmR0OLI;TKgceKVhi@qYgVC%B* zvdSKIJiCBCI~>&?jL30l`yOSy*d8 z;}yvmn_WxYp`i+)O?xIwheJq6XseWgD*)xls>=~0--G4s)lzQ{PSg-Vhhzv!__b-K z3{`fbTP;^*5+(AJ*3jpMM3jV^?VMy;>-AXC6@xS5_ObcnNj0Ee*;DJm_}>ilr}-UNrg&0vHqJCm5%s zb@i-)shei>fCp|()f5`TX$pJYYM7E}r;qG5FF2=mV4lKux?{ zobI>tup|rBM{8M^Z{0`QAkbj-T}NBVJ9lHAdRaqAi>iUFcd=8m(B?(j1~NoB*_EiT zkh?fgpSp*GDSa)+qvbqQU&LHETd&FoK(I<0HF(A;`xEklD`Q@(AA_F51MG{faEi+a^3mgJxA8L=(14X z2upTqEf;=Ct`zkx@2Px7mQzvG24q?6J)BpSUxNcycs{}*my6r4n0~E%Uxr@&m)g3h zr0v+L)@PaHWLfXrqGBwp!n5Xj+`ciNG3@KJIbJ(=?z}K&3d9{?81g)scf`L3y$tOLugX+>D zbyJigBHe4)q}*+yU1=1!V($*AS%gH(4=3*_)}4Wt`X@(1*?K(8PFj(s>xK{)dJRX5 z_}c{Bik?dN8>eEbE1DmfP7Ri^716RGZJxj-&iDprj~?QA8Plj0a%w#U!~z}?DCFn{ zWsGfd&+PPI@O<`7OGK`HO2mxu9 z7eWc5EC(h;9+SFSzrJLBnxDKnuckOV1$E?AwxZe2NY$xh~;>acP%B!y<+C zPKD+(VJAiG6%nSUW^)uZ2A1=G4JY>3XIw>m%W5j4nZnL`8Oz?X-pa8$Z3Ri6q39vh zR;Q^?qK&zt)c7E*Lq3`{MH4reDpv%35WFif#ZBxFf6LevTtDU%Gm026aCl8(<0B!* zP2ghH_)6S8-(ih315mqM6oX{?`L>sk2#RX7Ii%%Z+HU33hYt;Bt@${cmJFSQcvjZp zM+?*T4nY+H)g#sik&|zjjPaGKv*{H$BrDa5p+()HwQ_&9xZFTK*hWlFU!1XYy~u+$wFIwyX?czH_Yp| zC(Ei0nhu*VS)ktEM>3=h6uG^CW3i~J&NsrewS{N@OFUV_1=&y~DatQd5OaB5IDxe7!0LJw%3-VBl3(Hm6?^ka%7~RD(+MY8Db18Nbl6x~Dv=i~ z$D&DY(<-0VBdnVYc0v0*pI<4+ti_Jikh|QjCtn8V?+gf6M9BliK39b4R8nLZQq}V^ruQQuS9i(#kwDrP zt&4-v0LWR3P8YoV$jqvSqtEKvif*%Q8Dhiu@8TSo(t`3}43($wLWWG#SQw2St zZf*wFmV1aP@GBqGHbGpA6ZPKx46`jv);EPHx{iyomscbDK`rYgp1z`%VH2+1*A^Zlo^Hvp&eEu zB;PdRJhM3&SYMBSGD3*opvJ*_WC+Vi)Y5@uUiPR!Qk=Rn62<8iHQ}Ia{Cy**&e-fb zg^7v9R=%l-_aiLiQF5?=bOddd_I0NVDQ0;QPo0TyHmG&-!p>YDs>o#;{*rf>3y#AQ zboxTx0XeWqw3f*ln##K>hQFa+bCG7$~aeBNnjHZ~^*BROItf4`aU`ZL!EYkKxM1<#` zM%qkb(X}X9rjfIPlGq~hPje1OTYA_YCvgn=LZo>b&KHJ%gs#igP(iWdi6&3bU!Ob$ z8Ko$KpzGh_$(pWpS4L|a=E93D>0K35Ns3-xhh{p5lN;s=NP9S1JPkgjQze?ZhVnP1 z#^dyUf2iUTb3l{r_Ke*oQ5vvAInH9XU6{W(7!+JMO-|<_q{S^nyq}pMVSb)f?>*bM z38fX)yuP+6UGHJ4WsfjldtSV=W&MJxOD)~%hVq^?w`BerP{|I)iLwkSV9udm*pLnP zJcQRSwU(<21e_zr?D@4d;VFW2OHb5RoU3Y)qK_sHp0eTpOlKXgY`Xb}lmB?u;*xah zjeLT(C}g+573Kj1ymgg>GG3c0WUfgDWgXRzu5aVkx|_202BoW61Kd1%o9W#_)HheH z^Gvr$hL9j}MgiRWXwX4$qDR{cH#giz0${EI(bUtrT(khc;M}4>aKr~jz7@nt9K4dE z#Ub*$G?Z?$^T(QxQYcF*sczIFYpzj#(DLh}_d;7iL72nx( zIQ;wS{Pq=(zbpCp@jDoMv5_~v;YRZ?#y+Dsqo_ss3rxGryF?W|3LUER++jKZBh*oA zRybm<#q@=es5N{ej}U3mk9taV-D)j7kZ-2nKv;Z3=~$d>9>&I6JFvSLn_D1KT{T=a zu)vm2p5hsc$2&<1!Aa=`B=h1P%UKL>)B*jEPGEJHeNw>Pw zU?*~U1^XFbQ;++;-J5P>ZX|Z{cJ3~x&P4uVA(N8beCRz>v9^A~e6fw=g}D9;2xk zOS>0RZ|se7z%uWpws%4bCIS$~yC-rIP~@Td8hZ%9_2m3|bN0gpYWKc$SDMLnie)ke#@11A3P@cYK%jzuV_H*G0h z(JOWuP1vWg=!@L^=rQ?_q7BO-4Usi*C*wL$>^L&G0EIP!*TO1)+-Tj z{JZZjGoEf(p;}A^^&YA&ut5))J~(31f5YEkjZ)sjI#=OV5;R?9~ z6B44YdgMn;ttl9rJbi<6aE#8$`>(|8;^a-|&G~oII&pjbQcU|g{RI$6{eI^^E)NWh z&z5w{DvHR!AGtbP&TX%l_y8Jbw(zbl^BY!PNR9IW>syEG=AK32i81SB!yf&i9xaPv(O+f zq&Y5;W*La{yn;DGK@3D$$$;+5`gE&NX}0&}UWUnLk3BirBa+K;O&C z5vOnR&XzC6<$28&rTWqL2E4@nBM=?*uYn1n#>^?^g=zClZBv#g8>0c=TgIkne^YA+ zMu?TxMycZ$fHkm|QK*=hD}bm2b{FzMH;QYr<2nB#a;J|po^SAzPo-9ArV*UwBcXoY)gu~h;qhip~fla*qieuWOqh}SvA+JEM}v;n?_!Y zW}R_f^1Z6jaLcJ{+(@z8!9q%bSb9XBd=uqkLB(XzF;)EDv=MHk1%w@;c3`_KAr@t^ zFSsK6j5@G*6TP z8AG$C7c*`cy;f=p)U{)f(TT(i^G8J|i`cO)1?C{tRmfmzFFV%Wq`@>D7-u0y-`Ch- z>X=Fbwzh~of^qg~irHD6NT>QLxn&2`{^p=z6JfEj7iH}uEkweEK z!AFm3Z3kP;KEi(Xw8Rwny!E z$j*ud^-ht`N_Zv%W4l>6C~t$2f5TWh%DrLW-8Ar5NJ9-xMHwb)_8h2Y(%dX9-Kr_b z6`=CD`ei$y(X!)Zh*Q^j%mbA&3dE^bG5TI#nD$2+LQG96tyOy~XDicT6Me7$Fs4Bn z=gDYOp*qC0M#Q;_!3$p&LEAsAI&OwYOS9Q?kv;|MFlly}n0M$2PiX+V!_=59!wph3 zO5aB=5b+8`8S}6;(2*6pg&K5)>RVMkU@*ptIj@3(MPgMwTMP}BW|7O)ENEKAJgW$+ z_K{-F!7WpUu8Kk#`^aKfz$>tLUa*QVbH`8MW?1RsM_(bg!*dE^DMb;*nrV=XzsJ1e zTd)5V{g>vpDBGVIEdG>LYSlY$zS|EM}fYdq?B;68bxe z116fsK1-b}G=;hyj8gX&68b~4rFLfe`0;Mp5wIko-y0gty_##kjpE)i@IuAREu=bLt^p!rDU$k`T z(vrH-*+e(Xx0vIh+9IwSOgx7QW3(Ca(ZfY80UajJ-qu@&hOAID-+ZdbEu-Ddq%REK zVG5;`)OD+GQ*yT4Kr4D+ntd2^6myT5G$Anio`V*qccvM5Pa(iKSk#`axQh063;Woe zhI~&ErS`Rglt6AZ3)l@~j(DbWUKx|3M4K&gaTQ60NZ8smd#99IE zc_8K%UT+L!>`mHm=<0+CGK$8%Imlc}k?^fc&;Au4jMxv>cHet}ugd zd@fVdt;ylQyPd{-bMv$KLq1B&R?Bsx+ZpGN!sI=3156+s!48nkTFDsCyj+6UgtV0v zqb^`xU$YXs+^lMX^jbUniLo-0!2xMhgArlY@&7q=$2P-50b5{y5-s^e=!GPEHLF^nF~y%-O~7{ zYhx7g`~%(6JoDWH-S)uBO16N0)79Lu(j~)8yJg^mef2Mx`nAmU$Eb5H_ppGJWd0O% zA;qs{rBo4xUPW|KxDiU)UugadFo=FfOiM+sIWp}iPvq(+uE5pp59t8&F^Fy=)frhH z7~wg)eFV7IzgntLE`nm}dd5LSMEOK3#1V{tV>T?>>K$7P`l8GQ*keO4bx}*wf@gqU zj16E7-+IWUc{u5%U_c9f`d&jv+y+Ef7<_?Z&75q7b`;w4US!<)X&rR>J_s<4Og@=h zN0JwMsZQdnBs{Q2shgxooy5*-@e09juX7c_f`+Y91$7bG90Heg;z8<#wDS!Y*fxv> z@=>n2d@PPgrZ2?S80MQC8F6da2cyX5X6YFkDN8~f{#VeW=;FzL_P_HubmHXXpB7L4 zxOi`fN_UOm#yC$v&&s1<1%eb5C7WGfl}3X!)ODvSluRe0UQ?pI&)JBj@x*oVQ5m(t zhlX`XW6!?e{BKnu7BFIH7@vJ6Kti3%*sCs#-`8L-SI#xJV9u5URGK6jIr$3g7Kh~c z4P?K1%oT|hM~FJvRV%9XsA%_5>rn}rR0{b0Sz#4Oz8 zv8lNIexZc3h=VxN`_cex+74?NlIXyx+xs-I+-^#uQd&UYr?HnukwqCu=@e;<4WuJc zH@VW@M92xa1!D+c{pg?X94YzdJh1cF&y!Eiylk#lGx;Thjb;5J>!8EC?`X)j-P1@DTfdZV=Q8tN@qc+VM}F51RIyJ9-3NjgR)i@3o=tZ^=k z%uuh$qx|EJUF&A*iGHGV*R%wP&xwAFEe;-${)co?+BpG@b43{*ShIaF{f=*oh?2*n zGf=PLwG4o<=R!@ai*63U_(ZwVc(lA*6%Q3#+gzMd+`?zpvs#MA z{qI?d%sJQ{=-dzxwo@UL}|NlL$DR$z+lJ81Jf1Z5(f6e3m+ZoFRPdbhE zo7YXU9)9?+07B|-Cz>YzdHZq6*t`io6i4f)WR2{}{ITUrmpS)uETKO^UsqHBA>kme;;m#F#Y-G`)~08;Q8mnzUOa2V&|Th-dlS6(et+;ap3`U z@}7V8e*3?%z+>;(?|QZt;JL#8$7=h3HMDx||JEM+KgvVADZl-wWHNN}%!F=oy#Jpa zDalw{N|vl5DxWCfR+Xi@WQeXEFU;`s5vBXJBhX@=uF`mGph6yzD+9P?MH$H$G+-S= zb3CY7Wn_PeCW&a``^yv%5^M?FBHco~M)ITckcL}4d3U~ja%uXPgs-U0J}HfIX?&lS z?2uenrd)RAnsDk#PRp?U$|1tA~jf22-)zAJ8-nJjdIqv>L{M?Vl z-ShnRed;49Jy9EDyjK0H1J5J_d0pqLea; z=cJ6S-nlKwRNIy$i0}k#q}e_TXRLesi2X zFN?W0`4Il97akl^bAuf$$|{(}_H=DPJ&`tPCQ)}n*%c_uRMP|E)bzFR2x9<3-42jw z>t)+w8|EOdcBLPySw?9&R$Ub!bFcKU@oA7YQAa$ zxhW|b52mE7{Nr1WCC9TB$ECui0D8O=+~e>(DiaiG9+H|CsVbt`*Ex)a!96bALz;8d z%uUB@mNn;^RRPZajY4vmI#YP98lF;T#s~xBo05BE0 zs|D$rY4@XHDtjRZ>{;o7`!bIdj6XQ=j0Oi@U1kG!*hrgC!=iJ?t1CU4sg)jOlTTwO z6nme3)Ia(~$um!U?ykP{-{k#Dsd1J-so@YyuRW(DFFR32wPmc8D;Yt_Z-fQ%NMSVS z-T%un^Uud6k7LIzu@i4C?mrXz*o=P?2}sW5<^IaOPb`wL?KYO$Eu#SzZ&gmqcUrVU zxto470AnVp{pb%QJv=0n9cm-iO6}+lrUT025E{LYfL@(#`PSaVpS(LVd9wfK$CHNG ziMx}ZoS7^w*_>{}pmCA5+%Rn>EV~B{X>rG>x#5b;%aZ6VNwDmkCP<(Dx|afDo z-HU57?B+_xuf1RqY!F^CFb0GaT@5UbhOeM==5PHvNL5-+reA*P>o1Q|aS5*qgxWT9 zE!U$-CC_{y89H#n-G%|enwPESxax*x>m7r`U|=E_Xo4cX49U>!a7U;OK*1VA8x-#* zoZ&M@_)2zgP;x^OQ3qe=(ibjkhAjcFUgLn(uDHd1ZG)d~Pu?r}$&y#{UFYQYIUn(m zQdmG9$BuQYR8D;U-^o4Uk7p)6?SGgz`Pa^IUH_Ae!;-O&rlHWCZ>Vl3#KW-TZ@56# z(+4FJkALcaGWvAscE$0uJ`zf65urz^DrR6qhz)E}sk}Bw zhS8_X$?QD;7nj>%Ljd>`7xApJRv(DPwJbaA!LnqNr=RwZ z<$<@H^A4H)LTI0#Dv;48NRM?xuiChxebm)3d&Z`(@W+^Jkb;J&l=Wnh|1^K5DnsHi zEo`d~$HwBRVz8SkFSRDc%ay*tvNd$rj?=O%f`R)4HQUjYOw=lGxuE%BFhk%z+p?5C z+tLiJT$7S)_Ey1G2auCR#OYFjE|WO#Il8}tB^gKN{5G(VjCM$Q>=6f(z8Z$*-;<1; zQc8-r&{$4fzdhl{!wt0F#+6q(qxz_ee4b}hdK}*#wlUCJy8K}@ zt)@zO1>eUoeK`cH_lIVSTcmU+>S(E)qjuyWG}TT6Ri(5QtEC*ayqls>HVWCb8r`&J zoNyJN=oawIZklWd$n_{86!GRNwwnZYcO-A!Cda+~8tM9Pi-JPQb%Rp=*_jiei}6o? zc6>a0p3sPIgNeJBwL-<+!q?jA zt>_M7FZ8*B(xxmU=RHCCi1v)X$xpzG&1mW&px*V!JTeD{2e}oat1sikrj7F}1*4>i zddKI03($#0zE%r`1zEMwxrZuUy!Rf5wZ#RLpIr)xjpNlYml-Xm+5DAqV7{_XWI8w1pj} z$XubxFmYp~i`BPNo_-+YSlSK?Tom0rg@!j^qD6G6=j&B#!r+JxlD3DV=phH>b}4vu zM);{>|Fi+9Up3k4H4OraZtNzyQ%m7>BEH%Auz)U1+cb#ePrW)2C*Va2IVa-`K@l=u z9~V#LGTKa4I9izYHtT6g>MQS2cNiN@nKHXD$Ct$ywcx~IOSmy`O7@8QI+keqFxAdd zUf3R{%`!Zvo+l>P4x)#$&=z+Dmw8jg;LmZ@4H}-YElQix{uQk|jg3NAU9=pwqWl|s z5w+7qOa-avXCwE&_zeGaW_U+flmT$F}q^D zAv8S{@xFC^^jLTLV~^S*l3kaKag!!UE!A*{c=}!zv9^ef&`%pHYFxtt#cUBFPfQ5k z`Tz$sGM@pC$ZJqXvm(6MyTjObskI)VZxe7LlQ*T#lSd`B3!oe!t-j#_nCBG}^Riq? zn_@P%(YKwiE|s4&c?z!$RYj``GtlO2X-=EDuNwK;qdy_Yw0i3{6P;)WTkEo}%Lh@V zFl`DNYS!o4>4BD+`x2FNb4|=~fhBG&w(rnl87zPWN{cF^lc$N;@|vh~!KZpSNb90B zgH^Y4(o7&y8l^2&G4nG_N~|SXT61o$kffdB#5~_=enZqg93Eluo_g0NT+KWrkCL*c zn3YnkriT8+Z{X~1nHzA+HC*k6x#f>1?HzX*{c$R);?`Cpv_{qPFlm06iseMj<)C%$ zh(1bAM}kZ(-INLsa+s8V2y;t1BxEZFjkMjCcs1*$92PF@flW#NLUJTFUoT_6Cutl; zJTGu?IGQQN$$E2O3Rpxgm1d%?wwzu-hW>*Tu!ZFQRl>B=ETm&~mv(XTc2oL&6aB`T z?HH5O)J=R0JBkS8aQS@mp&`V3HEg025N@kjYX#(&=*fFrO1mk&R9m7>#TlQe<~R$9 zy&i_pqeHN(%g|9w)2rVSy)oPpVfLi`7R{H)k;Q(tu+v*W2vYz6;a@2|UlgjB*?HOi zYhv!GFVL|XAZvlaQ$VqBHz=6Sa(B?v?AW|IT*bIAB=?RrpshO$u(+fT^{t=MlPZK| z>$4d*W%RvGu%?>B#`7T()~#1FaP6;SwCBM0RE2-lwB88Q)NQLR-ZY{%3!>I0yz&iKCx)r{!9us` zE51#`ZoP`X4NDL92%DqVI^xWlvXO2+SS1{!g$&so>5 zhuswxTCwlyFe&Uaph!EJ#ya z3d)A15!xtuCK~wvt#Mb2(q4fr@|iX_Y^>FJ7{M|;trBm6hF~M1p?SC1YuXaM_HlR? z4kGPiqpAP=`yKpqT>)Jl&tbIf0#sykv6MPvW?$zOQ`-esHzBEaJ<@R)^R(?^aw-Q6 z)YajdCU-;AU~{g|Zczgqevow8q$l}_&gNROra-a-VYz)>v{u*S;>JZYiHVl>ZgvH{ zx?4=P3zIfAA|aG6Eo{+Z3DX5`Zpo*SLugxepoml;Oz(u@tmWa_>&a|8K-6n>Ln!?Q zEEUx5G#nT^1>4=+5!hnj22~2@w81w#uuI`6_Sw-4vSk(Bsk*UrXMn4#G3NWIO7ZLL z?YLff(M)=v( zcE$9TsKK{O!MJat%@q)8$qX4kNRZYPE$76@U(rVEj`Pm!OUJyytWy@LF?L)rrGPfn z!_zp9zc9#=Sm&IT_-6v9B$50>dBh4-CuNSWx{QK;T6hHXO^pp4Zlt2b({BtI~z zY{S(RSwn?;>CQXC7MUYu^qb{2B|#G$s~Qr3MTJ6!dah@u@tykToVJ_1xsnTU6G zevG2JxgIv$WCzFb9ZjBC3(g6RG_|2PV2Y5J*D`EW`ii-a1?N|k$)dNe#e&jdcOPtkxGw#L?I{f*mCCu)Yz?0e zyJwqTHrbtZ7f(+27L~}==6W6whnRRrcs;L=+}lm4D9pK!#jWLD#SRP_I8Fj6ESEoeE-lrZw7xr{ z;BUZ1JBmVW!=Tt?l)9s>yThdJNPDz)iUK4rG23BxmxlSqVa>}*8JU2la14_@HiU#A z-j<+j>7FS?JP>7sUO`7ZVIRpn0aptZPqLMk$eM=DLv6)DmNwzF!gR|nu@>}e88jS# zfC(oBc@~O|HQs+Z=i@bbAq)_CuGyA7oH5*2Z94d?D@~B_@S#CZ|-5n;+?@|;PXdQ-- zTdqwI7!+o_2YXU6ewbVrC5JDrU<(-w@b6(A3b_C-?hDA*1#Tm9&ISeleBf$P@NRR5 zyCEO{FLQyRS3u4K+{4p)K;G4a%RmK=Lim@$245(@xW99{cba8mQLV13 zK{UI~e|9seR=4=wfh9$xh*OkvXL`FQ6Is zvbPBdIV%1k;Wc-I2AYpVA_OLx?+JLlJtPH=^B$&}0vD#4llS>ynTGOXcevkgnIV zzKlwW(Q+V@!T43{aob~1odb91I&xs=59hzM%5&^SacD%iG!*Sv{VTid8^18!4~8Qj zXtw&T1XFS!x7QsFvDZuOEReDMwL;=Nld9fWG1RhJ?6+H-W;sMQ+Z717&2zJEYrk6D zhSBPcjpDPr!{=aCHE`A%r@}T&{TLGLTHA&WG!QC}?H*-WNNEw!4)N7X=~6h%L|4S^ zMO75D_inw6Zj9tM#df>&Mr+1W4oz%yO*FSr+Lu2qx&QIaPO*%>`1`w$uPm>B{Yd}z z_n!n*xw!1HcP-N})yFmMWLHL-unM#9{s*Ha;Pb!I^vLW;Pj-OlQ$~Onfs_<-eB{X+ zgytK~ky~35wz>#R9}`Mmd-UGZiJnJU`6KWX6Zs>%4(DmY{t14s$CsP3yJv z_e`FP8cj4?@ikAfSUt>DxTDNcwk|5QK+SLx63c1~r+U`#RSc6dSL6kop__WV#Qz)! z?^5YC^SO3cH@IZzJJeRhZ!j)Pb6C)B8>T5OltPR}4i^&SJ!v?QshO@?AwIc)A^POm zT7WD>NujGUPpf;;16c^{!;a@`9Q8>H(O+Q~e(x#ON4cTR?M;Kkrf7RCBwQUWr*oP> z(m^ex>XzB-&swQd?ZXvI#V32%3U_1B6*TJwWQBX$&7;)%k9=1%ge~H?7RKx%=Zn<2 z?fF8E9sNd&FV9YI^jvNE11~X5Rbkuk2;~cjrO{gnLQ0+~MBG3L(*RXH>`W9Mi&kZB zkite8rKueKc8zCydUogc!adf2~#tfeAhh! zVg;chi+NwZqD#t@^5KN5o58-Kj_^6hqhQv;w~M2zMpUb}FR&q^Su8_|VH&0brX1GVhATYa8-IUM!wzYd zmJa*>xfzD~?ldpoaDS5qhbtSbQii1AG6d_6M#-9b-dc-om~Tf%(qG|gWp4$(<0%jiPl61XGDsZ7)<6;?*8A+sP#|C_9e5}sU#Dnrml zUok20|6%Pt1De{Jf8ld12m(Urg0#>r5uP;$C>|Cb!borQ|1L~5{-0eXfPDLmI@t1Y1rMmZh#<-lzxuC)9 zfgVMAq7|{crWvT4{tx0bpvG7s0ai$~E}JkCzt({5Us7BsrXuk-gKz#;7!U+XQ40x98~F%~8O`pY8{DLA=JBIb#ab6A zgK!lzY|~}aDkuLR)6gUbt}l_W`5RuVF8L({`~Oa&=TGk-b?%Q8Zjyl^3Kg}mZ7(in zH}hXJT_NTJuR!Xo7UBT6B8)2NR*j+p2B8Ip82g#-s{LysBz@N;cd)J`cD8{Utw=ck z;)X0iKA6~YbU9Ba5~rFZ*g+jZa96`**AKqQaT3rRcs6?nED4JL4WV6sk;|4~Z z&=9K581FRW5MoAF529LL@VoC$hR(vxJP`yj_<1#)7_iuYb#F4a|BJpG4_>~VaD+cd zyFCo7b!)~A4v6OqdAcf_pcIUq44$1GfHdw7Yk062E+gQ}59#lw4QfQdS47YtAO+N& zGPEI7^40(dpiC^nbqL`sIG@u%3$Kw-su6=|m43L?KBL++r|s>>?F6>#~( zX)mbDs;|<4ZHWs;fvgAGG{RVg@Z*Asn*KLULxdqETtC=2LIy0z|D#s_5?SO{8i z2DE)aiba%R6W9w%({k%&V6iiArWm91L56ri4^qQ0fX-)z4^iv)%@>9n`0BR;+fTlbO4{Z-u4>|7D7o2u(1_TJ=6NFA8`1KbHMJ?z$ zBE~KpVZo0-ugwgatT8??D1qW>qk zjFHeuWZ9}Yt$r8y0kaGt9EM%kZuOn57Y@p(?`dZ~1xu&Q2v(2im|r?D{7v2*ncS%u zO;TeTp)#HF0?Q(qn*tEm&LdA`^_eMJW<4QA@AR`V|4yvm!K zppcr;m077dPl&9|*MySxBnsWHMq(NWH4b@8H~t z+{~k6-K4-i%E~GnB`ODC3)>D3+Ej;)KSCzE^EOYXN(DLanIOUgW{@&}(-=1heg>j~ zDGh(AA2KYWxvOyYG2kPsQ?y(exS(Go+`<|vMW3fC3BEYXn8&I4h6Cu5YD5x!Hd0Xki8Pi@()Z`G2Jus>5U`}lYAF&;I~kA#O?m9pXcct?aRJp8 z5B+P^I%EpLVql^nXdT*4MIH2Q&3X0mk>CIGjL3uwdX(e?tVmvyzo}0q9F=U^S&kGZ zSLlZqQGj+CL<%}i`~Vxvma%%s=?Ebc)6fix{J5*g8}rOK!km#f9tqUICxHw?=02Ii zX9i8(C}Yr(Bs?0>T*r4SzMxqKH$-Pgf+j?|QEB8C!UngqKV80 z^H_+(A1R})fp{WZT|8G+4)Lf{GM%2OM?BL;6HEE+$e%+ z0IeFIhRzSKoeT%nb7uBc^G?A|9p}za)2Ltn6T0P%f^N(@UbvtOs!TKq+Hjf!7hwqY z;kCKkq-C`2pF%-;#Nx&)^U3szLbwcfq;Z)Ae_1jVI~2S^1UOVOyht(19RfG-I|)E5 ze?UP(Zkm`9!Hrad3=)fip;WX}p{qA!z|jY!&QsHWu?hzGhidd4JuU@V0)$SdsWWg` z93*75;O7)Z|CdB%%p>vt!|f&;?y9jVm1ns-aM-2PtGKI?t}iWaC8wVo9AXS7e+Z7nAt6X0d}g z)k5qMMatUf{QnTvMmL7rhjYz;Hd~HRx^NP#C_msy$Q2}&t&1wzS7oT3sv*JobgZLr zCY;)maHshIgcihAe*@)LO0#MOFLD8%GPx-qc?~=jnnAY5ARMl6zZ3M9bu8~vBmno0 zb(*f1Nh-Y20Z)sT?xQ6~2>1A^b&bHS(aE3&oKII+O+mx|_fV61^}il4Cq2;fi}Wr) zXwB%&XcLigc_`=*4H=`jB7Nh!P5&7p(l^aGDw+zN1FeiWLH!ztRY&yKD5B_KA;Z;3 z>=?gBtQ`*rjeAM4LQU#*3RhE{{7W46@a9g{e~*O^vc$hNaKR-~Qo$TNT(wW;rh!y) z&13t$bSMV!KSAeTAhgs$ zvno?S9dd1i#2!Ix=4Pe=1%Ph=_}Cr7aN|xLq|EdA$Z-SFPG*-Vi{G?X>r6Yj$+3LmJm!DCn|p9H7&r?P0)5W|7r~F?i2V1;V0*+>ZRbr! zF#nO%wo_ZzP%)&Wj@iy42-;|!byNkw3Z<+O3jK((Q8;YmHdvE7X0;S2r4$~l^3ki$ z($GbwXq^a|8N*J+Y3;JZ%f2HdOa;nX99)l~=`B^DgvQm)AHa2kYeJ$W@m6!g&Lny1mLmv?3!=*pQa z1zuTZgn1urIHNVfT#Ynw9sz4EBUFquU(o|#WK(oH@h<|=Vx0WQslO@h@Cl>L^6LO4 z;2MI(A|-1L4)laoZX*mYlPkp(2df2%|PH4oWuiQwFuB`#&rpnz*Hny)J?=jBhDUS)}@4#nf(u*_ZYClUPINs`26>%`&ViUc&;#N!nGoju{;(uREThboZvaUjN*dI zd}&j~=&I~`l=z6pf0+KC5=WN-b`#k_-1jH<+^hAWkiXw*^&frK5)H0{oE=9C_Y zGskL6Yp|21LH|v6%fK@a=QD*LMZEqW=uh9#U-W|~+6m=-{7xFPgb8aq_n%vucB*Pw z6wEz(o(q1mdaxyRVluavJ+}gy6P|C^pNqndhf@9LNprUarFR9Te&!MR$m!~SrJWH%P{>B?X zE~CMTl&r;FWhmnJ-4dT$4BiU-Y!5q&L|_Y9G23xTJg0O}8ZK~aR&%Rerc{`gba>Ga@P z7VIz-w*7NOPYJUMJLhWqsn+%#b{ZAL_i7zxlRno8T&oIvTS%exdVeHc43TyE_TbH+7G_z= ze+%XltHLe25nd~XxPPid?^6}{b0_wwwe6TLN?Uw>`eg1*1k*8h!gYQgctr2L*~nH| zmONRS?COV!I{zAYAbGwyckHQ^)9LJ?XMxn2mV?yp{cJo0r{U|5(6Mdvw7gQTEH234 z*vDhy{-9q$U*L}x`j=$`puw5;wZLyxEn`}Nr`D|p9b47lqRzKxrt(g(Y)~R)24LR% z7hj;P_Gvt_wh0JT(QP^dn*m!gWL$H56}%^5ffzp^vgHnn-mfe0TvTegv7utWOOi~- z2;B-Iu);5a0R@MRGN6%h@3G#&lh2sd=Cg0$iIRX*>+>GWV%5h)ZE<)-47Dg4ijFZZ zJ3%E+LYzFLJxZFi4~#@dTB#&4KO@=>a{a>qxAfqR9EV)zC769CtU;+Ji5uXU5G3+Kd=dbR!be-h^pv7$5ReN0h32*m+uYMbrL&DKj3M^oP6$?Rpr(b6 zt^L^ZcRi0Wz*3+>b-#HA;cRD_dmjyo)aOG!M+>QEpNP)l)G+IT82f-dqrefasyFim z^k+y{Mf&sYFn{pcsC~Pm=($6}+>`00(|O+We67dxq&ZUlt6#1Ke7W}cyPwqj*6Bvu zxB9@7`m;{0(-5M7?ci1`exyzoS#<%(gP)Sj?!_$mf05+%-Bck4nH()y^&ZxBy!EsE z`9n?1IawP5egtsEL%VZSjaOpt+^cOFxY8OfeR&8O@Zt*Q2SfOSK6A~m?S0A|(+EM3 zIOa$lbe_!aJ%zdjR+DoLN@KDPT28KR`5yVGfGVz@c0q#~B$#vFyy9wdr#LJiljzOT z)j>&0BjG|MID3kXQR=#}J3BiVD4q}&9CDa*{*d5h@`}*= znsp6p$UV56sqBd+oxgv-%9pG$qh+`AWPxLpnEWaTb|e(KSdTR<8NTK*9lWD6jgAHZC~${xbgaBU1*0P>u+` zD0!+xO?>7AK8^qW%n%xfTUd%)7%88o0Bv~W+&l(r={#q;+!`<_rR00fmR06DBSe|` z5()~jRL-b0#@!XC{+V-2gil`)8mEk5Xcr2SKMuv?<$;uvd=34@#bCXaZKkMYi~il= z1A2+SF!6u3_;=tk52*w8SwoWAZQ_84MH{445zO71JXrBqj-xCkfJz$K_@2=Tv57z7!#30eurER0af!KbHv+45duz z4lR}UW(0!_B-vdkf)m`O2js;I5-KBr#*^d$mI1^1j5?MYbq->aX= zv2-x8l(1Y#Z2(awkJy$SDt}5Q3!c29s{lqse>RPM^s^JZj@XoXt*wFYBAlYcrdEYTN7D=d zA$!$-#?P8@K$Qo+w0W&BB3F39)x{Jr58qbJNhsXsPS@^XJPeEBpci`873q*470o>8Q3gn3h&-N(zOuv-9=PrRK26eD#I}Bm>z92J~+V9e!E$n5A2sByPTN+jA|lVH zL~}j2zDb5~FdEpBurfz%3$8g-9GqZ&Km&J1Ha7k8l?+(FQZglON5#t&=BlQFd)jX& z5I_xBpwVh%ahw$WPUuE}+;zHt~B<)&^a@$Qd@(@-C@TSN*;)%d71^Z5Bh-tNT;qG zaNITGsswNNbKUVSJL9x8Gt|^nfsvqasT4w*N7ZLX$%HMfSm`$ z|Cj;qYF>qsq(Gq^6PoTc~BirHd_H)8Q}6l0EU-OBOZW;%QN5&UxC=%WQ^-(X%Bfo z5hO2T5E2P|X^?F7izo7T^#B7`o<1_oBfbt%ULaU_NKz`Rzz`o9`ZW9=C)>iqQd}E` z=lJ%@A^drqBr?hZ5%(B0DBtrnYbS@xQ;|Xw845WZ)s;)G3iR>lvp^x;A@7;h@e1!{ z@Wkct#sSHI4EIWLnJ+n-Lr=+|2F&1=D+$1f11G@8n594rmKbpI*C7amthIkMt&%1c zH4n#?h)1T?R8Uz~6HQwgR;vu{+t0Ki0)SPLh+PPvMOENgp|eli>Uz_c(3e1F@ixJYq5^8I<|sg8qOIN1T;s z9w#J1n>>Pn8AKEL!thU%=Jtj9o-ge0Ra2=hJThgc2*)K~P&d)MiIoahUYnilxmwY~ zkNV`{xHH*=6psczqqPf+aU&kyRbY^1UQh=CjsdElB&fkjCTo{KhA$KH@NV+Or#xcL zS6QkF1k6FqP_oMqWkBU8^r^Bvxw1i!5D$bk1RxBKUvs7VLK|0uP&hL5<$-n^`;~M7 zNa^)ZVi{bHWVJ#tnO79@v~eMfy-A4=i$T`{krCs%7awwh&mu$P*i-cs@`#z!QMW^w z^~ssS(y3<1qsOp!|*GYuA-{fCC(VeX^1T zAuI8z@)ayXipTvWRSlSU?92p#bubLfZ~GES5VL-0ByoZ|w2u|cfR%a3B>`R5P~5Jk z{CVb@>4o=7+1W04un6D@%7@4X5i1ul_o`scjE~jeiY~*2VpXx<+`G$RpOF+UPYj8a zA$zL6@bDvZbkvL5#?ki5yPEs4NRDTp1=1ah$ska6MRv6XfpisO6=)o85=0X}2~vy? z)ld<@4PuTf#oDDhu02E^3P99T+*i+ufn-*)ccSh=3xUP!Um*c71l+;PfL&tUA@Yq$ zx(W#@A;ggi*0QSUHg|2)z!XD1AqH0EKKs~dtydrW`#*j_lHEbnLy0h=9^v3;Najo=c&ai%&|L9<99ed zCC_X3B~5IWBeDJVa_e4&tH~ShY87#>%||`7UfyHP=gBLc>*HT%wgT#1{ns)_E5rv^ zN6LV&1h@RGt8beLrL?qha7a^6gtpIjMP>IVYDqaBY|gm6*-`B8?+@UyQjfz0I3x{M z@G1#ok(&%uom82Y+e4&s?a|k}8HqmS*}3vgV9r3D zZjQMF++{_j-jJIl@e$hbtk_hfM(s+v@;6#Md6P&b9n)=n-Yuh&Mk_)wt4~@W7@FSFRWvV?+QPArUuj}ddD0J90#Pk z$Sp`76zv%L$&}-9NfQ_VK4oT9S53U+SbXogH2|!AahyX9_ezy(RK8u}c#cOp1ZrN{ z6r7hFRiP4EaU7SdWL4!e5-ubGE>Q#e9y}4QZ=gpo=4}2~S3c`r?bWDe>XwCPP7vA788_R1cn%&Xs$nn`sx>orOl!xa-pTFdoc7`jqZcv=Q%!JN*55$Ju7Ua@-LuVP)g%DIz*C zo2Ayo;x&h|gJ(v!cizWX(@py7cQq|k@Q7FS&JfW)dbirhV?6BgT7UmYJg4^L3r94e z!kSUt)FaE1%>f*_4qL6QwcX_y>$(l8Jblflqs6AFuFDV4S3TX$Rtat-O8}Fgmk#I* zDhL+fAOr4P1q8yp_=L+!*M9EBQd4qHW;(sDNBekUI1E5+$1{L9o_!R_9P=`1E;rD~ z`w~5N+v^AVWi$N+qpOJP17;I;!^fKV&D6=NwkJ3C{2Zoqv?Tn^QCU|YRvWJn|ZK`MU;q(piG4o01g?x&lhZ9DDFSWj8s5S59 z+GP^tl-1JgSL{KmA$v41<#%(7Pd4FEG&854K&ECTNFFH%%UYZwYEhsKEGWp z6A}JNvi{xls95^S`yEClEds98V0CxuNGw*@HhX}0fCkFK2~%Y)QURH)qbiA2hJ2h# z8Y9HG?&XJ()JNigC~oCk!`sP)3c8ukYv(1t@-JN!GSvpsofh)#y5R55a{^-TukDQw zSY8e;2q`6>(GYQ7e*I7TwsL$;{(x>xXLGX2*?T;EWJQ5{xN=_|mqn_+! zvRwyscfL%V`OkWfkeqd}NP92God@ZFt~X=O>!XwAYXc`ls~a?}`FqxF>S=_TQz4Y+heTgv{chOPI!* zxX0_!HTqk7-vxUl-<+k*T+?JklgW801@AA4F%`X%Q@XadPngLO*Ph15h$mPWDZ~R- z7ru1LbfxUUQmL~Kh0k#&cFY(zsPQC9iN5XjV7AvPqqdL;41h5xO;B5epD07?aq zJVElpCkK5V!KVjmD84%Bz{gmrhooPM0LT(MLW-AY)8dfJfmDEeHPvW^gU7&-p>XKS zwR^c?^?Q^1>v&8>@@Z4LYsFlYQ?+e_bJCtvLFifYwO6}h+4Zk9jQoQ2&I;z!`j@8v4h(e9#a*q%R63FUwH zX5Tk8du-)&WW$u&DVv(A7L0q6D~4E_P2}X6`?TQpL!p{J8m>=UYmIqR9@^f#ILOcNDL;~L=4^>aZ6~Mca2W0^`;omy5 zQEeDayUBucZDFCC@ubV%6&`=)kSceG44Lh8-ALm<9B=ddCfxjc#4R- zH>3r=#GK>^nyP~J4cDnjh20q(I?}j)6&qC1z;0hIAisVFE**D})F$#?S{&BsQctb5 zFA6QvD(S^<=E!mL#{_cKb!~g}|HPaLK)9JW{7zn){FU*3Ne5 zG1Z{Zs%v49J=5%UvaBS%S~un5ezH+74syWS*RPRPRia{It7db@DPW&qG)(FzW0_Ia z=-k}a;Q9LCX$j`%pKah>B6WV;8%NEOoY`MRW5ep`A)pR zOXQ_)VxmHWHPV;W})$Jk#N`C zL89uQuJgSThEpdG^tU0HB9(EEnVxS`Ffslsd?^ z-y?j^PS*Y`**RdL$riXK{~v4Od$;HxdOE5iImz04dD6CW+gy^npHYX!5>nsdnRwaM zKSDWp@R`@yV~|NH4vKd>eQX(awrZ-jBr5jc(Y?kj9&^F5y-bS3#n}C$^?EOkHosB9!99~t!abQ)u*G&^_mNSutUJQ<&C(NF;?e;FpOhyW?p?P}>@K2Al?RC_ zIbrS9w*69am+{xyDDyhZ`zJmdgq7o;78U?$`ty06YrZ(@s^-;syLE~xugTvTq`oVO3&FCs1`dIXrI~|r1SOuP{SirF}>33V9xoXlxx)NRi&GM z4yy=W1%+E7y$r@aC`iS1(|FUf=gt9WdteCaWxuV9=KWr(6 z!27|JPDjCtvuMSmJ(ihgN*`mgZ(+e|8Sm!)EBvO%>%CJO(_XsO#t-^gW@ZC+ISKa^ z!eVYv(P>bYyBxg2UplGbb>2IOrRT4297v`d7@jb#GA01uiAw0!c^u#!NPe~k*i_V_ zxz9c91dtkfWZ}Fw`)Ec$=L@Lf7-ZGI>gR>kpyz#}MO>GKz(W8BPfluwZx=hI`s|TM zp+_igNr)kh39wjZquV9-_*fI`NUylQK}3wfYV}cB`?thwEtP_SvMS6zFLuDj;Wn?= zg?Y`sCudSR=IB{JKw^EQ|9OnXje{GE*)`=|V9s5vdTzDte!ccQCdD8mUDjBl>O(J2 zR!L2T<(1U(Y^O&;&NKe)o53aKZH__ptq(&N?+SybKd7fxZeTTazB)B{cpiK$xp!sS zmV0(@u{MR<;o*8`Tn=f+=xxWH$f%{vIsTqNOtXDIinr--x^cAXzEQQEN*T*;W|HcP z-vPZg%;;xnKmLaF(AL1U8;lx=9KCN%(fF^0MvteO50J~Cx~MjGyVlmH%yve@G}DPz zN~F+7vqJX4O4AL@x^Nlya@IG1T7uMNUh;y> zf%M3?ArIPswba3p3B#>>gD)O7-tQ>&G=fcUtR1ZH)q8CS3z-j18?VisPWl*fySUAx zU&{61aBsN0^6^gv>C4Q1(T1X;sz|wu*Sl5ETH&i8DL~vu>g*g2_pD2Le=Msq|GvCq zMdZBlS^ADDl%4FlrQ7GnGuiGEZ#L3qTdkb?Ju2Jo=Yga`FSq~<;CcET1QIRWa+A^d z>?$+ojUOUPF+{-In6wm*?9osf*Lb(_p3c;0{)}zdAG$j0Ijq_RA9sH__ok5$M0Z;d zHl?;xwU#$%JijeOO}}&u8W{EMD6sXrk^Q5Z*V(JV>yzV(ed<8cKvIC`3V{^`$UXpM z`PZ_uYPv_A+Ff)LRDvHoBUZLo{WheKAl*ogQ{p8b$w%3Yh*4o z_vOKd>cox#v(Kxxlmj(x7GXq>7ghYvm^j-f3cGfxTUIiPfMr%YSQez*72c=63GDB+ zn?;EekI9!^P?U~2*)MZ&EcaN`;Ii;=tN$AR>ayfz0B+nCLU-S+pB|l`ZNvife!o9~ zf@aJE7;8aZ<#Oci5(gW=k`fII?0KsdRpy((1N&;lP*G9W2H0kD@upQ(G!7UAHjFJh z-26W~C*ab4&g&XvTqwr!{b}Rg>`v1NkHr*f=`b!QQI1Ettrz(4v2pF6d-R)ndbhoM z342=*yRsYr7rZboulJElE#t}9B9b~vYY96xz{ZZ$N)tr?kRca^&`plyYzbkwoKXS( zSl`IFw)IH9oe^wu{(2zmN&pNrQF)0sj~?vSQFuS@S%}b zYG*Iolm5XrYX zio4mGxz9TZMs>UlYJ^ZfY6n1ae@tHRN0V1T#QM26A5R+zfghr#;}8$rGq0L{{fV-z zehMT{Jq{ln!H!58Yh3%Ipq~ktJp`!L@E);M0hln|=q@ZI1eS|whFM>>_}C*RCMK!# zo!HFbPhTXrd}7P0T21q`wW}W=J;ZFzor%w#ZF>1HaKUy4!08510!O3K@6|0VEM`Gz z$F$kAB64)p(A95h>hVex>=EWN(Zwl?`ss&JU~7teDaWtU%%!nQ@?iSg2euq-&SbJE z1@iQjqy5d9wZz=h!bVzJsl#_ftINxLj$M)D=NoNj^`4vKUr)SdScaok*VcCPi2UEf zFnh4(eXx7wyHEVXO#Z=4w~MR)f~sq?V{%wMt@}JW9Jp4O zMz^BL$*0oiec$RN{A9a8sU=`{&*{xhX;IF^m_2X;VBggM*u3`)SIXh&+}2*O(v4=g zT&fYQ`u*nRl4-ZOKdV|F=V3Qe{Vj9`cNl@^0)=ch6lb)G0k=EC+)buLmhm)oP;%k( zMvp#rf?eBuk~1rf*LP@JVsfeDb@K}_Y6YRvh{t6y;cL} z10Ar8(^bM=?lqSmV*n|D_`xS&Itu(O?2TJAAoV?c>G*U;>jv1^;&%iQ7v?vj+Dw%5 z1J+j8F`XiYIW`)*J1aV0)q3`vh8OmwG-~N&m)G@RIzFJv=LqWBc&Jb{QLS>|?=!pQv;F%W-;+#i3U4_I~U(IhCV;5ZziNoid92-N*BUd!LV zgxKb~ql#CJQa$}%>YVpv{468n>v3;W%R(~Oo3mmAx|x8!2t6ijnCvYx0^7=g=Y6C= z;g_t=K3}EVQwb?WvhWC zX0U4*k4s|pOdhE`2ms2DowgY(P+U`*ZJF4YWSc%8q4m`wprN|$Gh+}n1KfR&C*AH& z+r-yS6ok;@s%YE&>ao_x5wIptl=R7v2!MnD_i`i;Uz0v5?D5)<{6bDS;063=%jyq> z9Pi@>5PQ4_bEUq_xK90bK8-_vl3C%oxu!?00XD9n*#^*lJ|T0%+Z6X_Kdnpw%xSba zSr+&Jje4g;nLWIjT4`Yi>S5r_&D8qc@hal4rD}^lYh{LqwKPWKKi2X5FEhEkfyW2s zC1AE;^d8&Gz?q$Ha<5gGPT!YN3Le4{LI-$7WC5 zq7_0Vb;yDWOMWWh=7+^w(%zq1V(Iq-TWqtV2GZ9q=lV_u<;a}LF%6iEu2zVa^O$4Ig%gy0+2`z_Io9u}3Q`Ytv~0ZV)3<{TC?>I-Z1i>;cj?b%i~ot#;od*hMq z#FM_&$`&(Ty;LZDv`$gg^f<`tjZ1CY@u*Dk!2$;YQZG-TV-bLqLw|?+AwmaMQbqd8sS!hrIJ}`Zqt*K2<&JYT2`)dkT#aHFv82@~)PnQ|Vrc5s%keKi64Xz2tX; zOv{pP%%8 z+*lht0@fJN80+tosZ>WVC}fIt|7Nb{MWA~7EwE-LHup}xxhnor|BiTl){;@B#p36U z7Hk>Jc1`T|^8M`&r5i`CbB`xKoI|1sh0k;6#=m^rU26W;dKnb{(7Ay=&G>`aYcuub zUOg>?N2j#SxSE71`bGQ^43JW=Q6Y!YH{>Y^wQyhGy19KV*Sl|E>6m~p`L)QH@2*F0 zIafk)&);VOOo!I75gz0=t_}_5H5CJ{tpU+S7On3q&q}7-ia>!V8`^&D{D<4rK^A&k z`&<7Qulv`GMo?|3Irzj;|HikdCs%55ziq|^1b=K^B5F~O&c*b54u{5p0}UZ=p!Nf} z3K03d-fV!E220Ey)XO|0l&?g90*`*-5;@L_Esv?x`ZangVON)yz@u(=kZ{b9 z(}?*WuwBbygmkkkJf!9ZL~H#{SHPBWboPx~wEDysxv#6~%4nux69z@sqj_U()8WB~ z$s2>TD+fT=>X^SiWF6<5fm>Finr9wNT!NjSyTFcTU1B%a_o`;!jOOLYU>4l9IFpAz zDVx~El>#)n-?+8@5LA+*pBLkgy*`l+w5?dlrm~^ItjEY|s3^3~Sw)7DpCS1uPsdSf0G zz9Dzs^xb~K>+f0*P--Gono@t} z)R){AyIN2Yr$3(71CdCb)hVUUyJv08d!v_f>S$Ue_P+SyYi!t90f&#WIaGL$lwNeJ zfzt#SXPe^4=RY5VJ*;YvHN!H)D$kXoRB)E2*vZMsy@OEn@XIyn<+nkXS+DXmuM1$& z&k6;%X`$1sM09j?W%H&OH#axf!54NKb#QQaP+mXw_U)6!MLTkcm3hW%K=I^Y>!8xB zMNLU5Y{{$|9_m!pJRm839KYSJnJu=!M1K@6eLjep7_#ksgE`w8o0-{IqEv3zVjJ2m zdL#m%_ssdoM$7@>l>1BAUA-HTn=}3L9=Uk#B>)GKBQ;LUZ^IkDI$iQl>w^X1I0fP}T{8r4{ zKVc{Dq(JU;_woHtWjrvO*=d}hcku~rEl#nxpd|9CyxlR6GYcFx`%z23z6(5C^HRK= zfB~pWGLFIO?)j-z00hya6`dw@M`$(5YjYper$451(s`HmfD$W!I@ADO{CK}_4Hz&1 zr+G$a*dFg~N-2k4z4}zp}o2^r`PmcB9EPhm2 z^b&GFa)Dj_lWtHTI+p{fIY(j{kL8OZl;WLcAr1`{(gZ%%5Pp!QHMO4W)CYPre`8Zx zmmIIkLd}N}(PMK^a{NpKu=#y!g%)%TklFx{O8t00Hn#>ib|)T-FFy#gHygH|M6Zcq zEvn6b*YQAd!g>fq&1$g!0Bds#B`ljj75~> zn)qOkJU+kOYW0`n$yBE>r;-S5idc5$cyKajW= zrKQ6|K}&S3tT)ryho(%vM4vn6_%BHC(%odr#2arZO7?Rx4r88Y{+gcp%;)C^!M;9) zkh}&rEjAioYM=9McDtN3dx|uC0(cJbZfB-p1PQAh$>K+TR64w~Q~j!nxxWHU<}vlr zKGC(qefP`y`61}*>x>%<*#$sq){t9j2x6@Zn$2X!ajfH&#WU_oMrTCMFsK;Ze_w_DPp8Z*|?cY2Cj6*mKV#atSTqg*A1iE#QZ{dq9=DhM@k9djT$`_}`T z7_>V7JbrBMJhzdD0KcA<0X&{T z)(3u-32~bTzS?(5&sIH}IEwlQ9x^%i^$r7W1O85(UUZoVv$8YiV z`&%J5xUNX{*6N$o+&_+7bKwsj#h{0;g?TU0W;Q%@;+pffiUY>4 zJfn9ltije*)ZgOj^`p2(UG)U2%O%(B59wik!R{UIXvyQ5;rpA;-zRA|*5^hBz}+ZF zc z?H(T=53#eizez>)MKo|%)X~Mou896@`R;x9RW!%y?axzFYHDiuYAX2!1;Ltg$Imi5`2t1d$xsKvlMS}H7v?^I9Q%Q*fr;YMW;s=N5D+$YM z_Ze_6-9x9L4`8?jfQn;G#e2Ub$0yb>pFj`hJdBsm?78uWdw--UTzdXT&GR(2+1oUif=rmf)b--=;S(hA*$InW zxunOW+nyGM$0vM=rgM@Y{3^KFo&V2b#lM2RhkJK_+=JiwEZvU^y4kM%y8ipJGw-Ky;)LES z-(USnctjveWsL|y)}*r`J)Ue=S&)m8Wq%k^#2 zVcYw=*-JQh9aeUW&?`cVf@BXF!NnBPzYtf_a-RVy%Dd4vvem84LFOV~_J&Dq1pk~+ zfAYuW%S<)u1N!6R!}5p+uitMQM=+2%Ynb?o;R}%qJbL{-oOF0Lt7HJDuZtku$Bu=O zFNy$%;JId!H-Ib=8Z%7;m$;)%ETtu7sBnZr|75l8F5%3|We3B=H|q8sRg;K|)NQ*H zIWK{@=YF~_^ljKyk*)X1T~E)9!N)7?g6-Gk{*J=}hN>?zh*XKnsI zN~zm{-Nty#7g%DSmK?a6LMmLW_~ghDm+z8#@}K-#Pk<%8xrw8h zmAPnxAz*l0Xcq+_F8>GTvHe$h&G92bA~ws2UbmlpLLJ{&baArM=xON|LYb`pAHv=` zAj+-l8COQG=~gM}l5`MIDFFdNTEHO3;X< zIp;ag``+LE{h?fQ%{6Ipw6{csK5x49@)__o<1!wpVqkt+ zl*q6}QmyOPSjg}}8a6X@~gIVr&k{$8IwT{@45gx*y2vNv5K-{#BsZrrQB zM!rhbPHJnM+t9{4Fq6MypB6oTjo}6TU5ZCM!4E$NsHJFox>_YQ^++sDV^glDO|=yj z9sjJ*xk`JRL%jpR@@x2wqQJvqDuPd;gCIcx>8QG9=k05f(56%K>)Re;9}vhzht8g! zq=h@LZga&`(qE+0#SfngKHYL#86LQ;rxY6NKunn1LXvVl$>yy1@{M|Xk-OmI3OVnd z{+y+*kwITNCd1_IF<&ewlMEM0|bt2AjmT)b%>$LSk3fy@BsP25xi2 zJm3e1i2JNqGtQKypYT4`D0GDlTJ%krSix+%FdbTWYO-nd z*}Qx0PL1p*4S6(`PBB}#m@Qo@4>O5SX_JtQ=>r%_F$yUqr^6CXnM6wWt2!V$PMTQu z>AD#1pCRQ4A>J1z0x?BpmrvMdiTcbmza|4Om5a{pPZh9s#IZP_*oOK>zOXuvw5hkY z+!DQ|l&VVimcr_4qKd63J{j=?jVL74N;vT2PSTCFIXQ-FLxqTPI=I4>k&PNU5#F!EfCa656NNh4)=!Z$_d3rp*D&($t$plzVR9)0D zV}q+q0Xnln;+3Q9*4~|iZzm(qCmV@wcrSAtzOwV0ZdLMGZ+_54)~Q>JMess_8{}KZ z*j*&@+gKt-sv)fA>4Xe8Y=oGAwrQk7h~FTWA9cojB|KiR!~|9VcR~_SyoNentQJi1VuH(2mL$tV3lW^Cq|o$ z6aLf%j~zQGI5+YvC0baWCt5yxI?nslnQcI`^9SKEoq=-F6Eigmeys2p{UK#{f#fwK}+@Hw?h2tB=NSIBk-- z_OUYgGbg7I_f{`4p{;~9iti1%m98zF3yGqz?OWAEdR*f15E9SS$Y-OAbHRU1LqjdV zq%AvA$dz|{23VVm;6Q+gW^cN8T%GK$i!kZF$U~%Q* z3y?SMM4+bn>?>Ykx4!Ga6B`TzeC)hL#|FWVgiL=w?CyR5rnQ3QMiOS?$B9IcO;ZpG zt0yAJtq2oTY~L!FDF>*M2;pvU|6Y_KYhwVUC0jWPCptr5BToovpRZ!sUi6mS;%|dm_L>*33Wn%xztB8CSN1Wg17TI^3r(OQBCtpa6BI zkdUr2vGr1}Eh~@Cr+s~#>&(y(4|`9jN*I_At)!z(WmWCat)xrlVa=$3M&yeagaQvh zG5Rqt0&GSjDC9st{$gj$4M|WTI-TE)aWI*{7IzjM?+UJ2`_yzav0D4g94$OQ*`|u+ zVrD0?Hmq@X7H6z=X0VR2Io(&k^HWYvy$wXZXbrB@L4S8qxIu-e-B(-u0S-3ntADX# z{(s^Kn+|7~q-tSKU0teTW@G3jhW+<)hpgb+jmGLAg0#a$;T1!W@d??&(3#iRJ7QtD;%%Wd z{O%_{@tcO(6v-;K$QH%M?voXdT_@+6GE2!8WD{K+ISrgA5rL$%TGD9C_WrSTymaJI zW@ApNb>rIfob^nkRxEd%we0hCLvKk^KGt~>YmgHnz;8fNC{eLz*KduU0|1bfC;&O+ z1J03HFS!l}TyPp|JX#z6B%Z#yaPXORh^eu0ew@Q^YASWs4G+L7RDj2Z!rU>!Oaht# zczV_y_#_?DZ+|pkURrujIDL}$nOhXOCMD9mZKiDM7Pr^n$ZG&1=9h#TCJ6Et9$DKY^MIPOoDl01=(mg!0ltM1{HaI2-vW ztPJfs*uP{`$Y%r1$Nz1s3H^z{ML`OHapRLENS0lV5#}afVTWvyCFrx?`_PFN(L4V9 z`T}j{pf=)FO9zaNU)L6u|a|0NWz99 zgQI)|m~57m#~h$A&Z6zQpisTcmH>Hq*uBdg@4TnJ&=Ody>^eBwtVet7 zuKt`<+H;;c%ke1HYgw<*rXH4N*N!M7_g`IA zPPuJ;37`~%isA>!^RZGAvLus$5cZ_Q0h)C$AkY<|jl%jY5BuHQ?*4!{cd>;-O{3Fr zf6AUU_A$eio_3_BO4ZCA|uRd zGh>2J2fI?0=}X+ZE3Cy5}}%iz(dpHaj-WA zAVutf)QGS-BtlkCB28KX@=J}az_+B%Y3qj?EHbLIo~0IiICw8>kYCcOG9M zg^gB-W+)pgt;eooV<^M~ut=Qrb8i<40MO^%8_5qPuPl9@(}Z9CmSv=tzL-IZc~2X&aQp?H@5^Vdq!lOE>M_Kd!n zJ8_;+j0Ly=D(hBLLXLy=OS}jk0-{Jzv8h@y-6O*%Q?!!Tj;7EBxaMyxOqs+T-@$sa zuaS&5k(vzP+_IbTuZAkBL+VCGhN=o;s@B%6!Ca^NJ@2KD+K7oFO^|sq-LDuw2Kk6h zicCm0F)Y{`qbL$}CFYmAQ)W88@o|$lT#eMA0@e$dngdcnoG4rZKdz+fgtmi{!Sd!v zA6-mor&?WIU3q!Akf?+zmCE+^_6OZ+GmdC$dV2bN5&N=(uWWH5+{{ar&J)pug7y0> z1z}lm);rPKd{^I^jJmX5KR#VQlLhCUiJol}5 z38?Z3iF#T#L@NqRmRZWSRQcVKucmnUU~u3nRyVh}svuAZ;sVGl7q$8N5Wgj_EjzR| z9F$KjO{_CO8zT|t2=GCD@9L0}AgP|s2KoY<&Wt~rVogmCVg#?xAYXn4{w-3zLxh*) z!zFx>L)5Ib-K#8io(^NY^6P^Ckz|f;WT2t|8+6Dl8_BE$QryI0+7V;OIhpI{OCK}> z^s72W=$0XyC_tpe(%_#BV#p)2kg>*Xv$fobYlAXgD2E4WFZh z#F-2ZH=Z4577*iaer=G%PU-0Z7y+M*h|u^hQ1?O=TA5|V#}i9PfT=1kr6=%BJ^x7_G!i}KVo3QFU0grRr|;7oC6#|tIGVkRunmxAmotMoVnFTu~>4p5Z{#O5-mGn1)Y zORj10x%OF#l=BNHAGK^D-Y^F&x-TYh}9|8YUMfF;UelTlIt6iU?r zVt@}?d=Fm+ppm5Qce7hK+S$Y$J55}N-z376qrTWG%hKrbw1WN_z?!_o;OB`BB`*dC z!YRw{0v!0OjbckK5ADZ=L-&Wm(+z&7?)7KWLa{^HyX#Bx_;?`1(du!^2jJlml|`Oo zcCc1Bmw@;;#R2CoJ|qMck!$YlKTZPoC;>hn8;GF;EV!irR*?J=vZ!q!9y-#bNgK&l z>*^DPYv1jxr&}+~+sYnpI62kjzH0Tq*=Rnv^F5&(RIoBF*4^8BD6G{Gy81i>gxiN8 z69gK=9QjrgNVghJ=zIMVVIb%(v9k3JC$)wGbgokV6RnPjwBe`FVOYmZ_az~{NQl)F zVY(^8X)JK8pag<^0UT04o{XjFX1tNJDV6terpDsW{FSe0UPppHS{4qpSTmPeT;+krQ{XhUmD4xyU2;~Kw>?6BzXCOJh8Rzfx}%S1ui%E<1ImH2LTDpZa3E?uI)n8 zYI}(#+B&?Mh1dPrvk%S_veDOM+N@v8+@@0Dvq_daiIJQ0^jq-!xxT)jNq+WR*d61s z3OfnCi9)_R2vgRyvU<_TysF#2tfRyser4GVP81voyVpB2NANBI_~rEC|a zAQ^cxNrZ+PE%3d@Egh6JA_f3lW^?(8C2ZD>#KQx@;6rgi0G36kf6!!dY1Q#A$dMuN zBnVnyG$ZCc~x`6+Nq3P~d|wS+%xYtt@i3Mdm7oo?s-u*x&=VNz}w(mj}Z1T5UZI zvyf6KP(Z%D)0d667ph!3qdiV%dGC1r3SrK=m8quR3{9XP8G196 zO*T~ciBA})wXxNcV7xL!q8+HSf!`_n%@}9_6udBrfWrL=^{hdDY;2WWkr)BL2%ex2 z70zt|&VgP})HIUrTB3Dpi>-%)gDq12>4K^E^Pjty%Bvnec5Gt~16HEV*UA>aR{Z0K zn%12_p2YK46SW_H3dosPBX4%aR_=5LgNWBQhkw~@`(2%*9*$Si z&vsSocUI2Da^V5YjY1uM{;Ft}8o;RW^xRigsD`yc7GR+q1w~#@PC78!bGBZtu_X<8 z1EjeSx&R75wtHkjz=)1KDr1DVz;fH{@UdCLx>z&r+#IM9P|=*-)s>7o(F?#G!O}X^ zfCiEiP4D{3%nCDGJ<-s5k#=Rm|Gbg2^9)d+G)!<}Xna;af7cw8 zrv4|5)U~b-k==d=v@yNZOZ30+&UWo8%N&&`{f~d@_#Ge8>cHMWMJr8`tKlC3!pGwh z2<3jD5sM6`1Wo%pNGi9N);7-jEM(pB~_TKS#=QY%XG2&G~*f!LQ92DYL{#5PtL zFK<-6PKQ;}q7jraLjia(Y7{Z033l=cpjPa_FQYH}(w9(?;5E(I)R&OMc9phF%#GY7 z@V6=Cc8S@G(2&yrwPFXL)I7ePwc!H+76BpATs1x+aGPK{53tvu5kCSvu|=7n!ohCU zntK)4dT7A;&M3Ow^%bUM^44w*W&&XYA~LXP1VodSp&>;}qre)W0AQh8_ODmw=j+V% z!_Lp+>#&P-6N9dLB4}wDqtFmnP(zX)1ia7VRNZl>PDnsK^|p#rgXs=yG>)%2f@q@} zc9D?ylRRizrQ8MR2DCj<(i#Hq2gduCQMmt)pmbQRpEx|};OS7o;=dI(n8vpT>mb6i zgB>`&{#UNt7?PKzdBbK4YUW=(B+;p~GvQ3(-3fLGVCngU)tJyn*_8%Onj)^1(wj3dGG*3 zvNr=#*g%2l#zz~1E2A}m^Pyw_Y}T^|h|H*GueZ}sZx|sc++n$&1{?gNvAIRu>Ks(O zb7gqAojIWWGkqkOzEChBrH-T^$Zg3Uza^!a5GYl$%uGh0+mh`qMd~x#(wCuVEN(2n|lm5+@jdRsul4Ub#!(7YU3>dg$k%xZbhrw2-Eq^8TnC<;Nrk+ z&mG1bT_nJ)L_|b<*4)x(s{D?s_FKg3oBH~wxK#s$Y(WcNOSKPP)0r&36}aagu-OCk zn-TYW8N;bXp+c@h1-*F-+bX*YweXsw4u)qDk7U1No5F<}0s?~L!;&*ejORKikibb^ zpxJ_Gw`rRbo^wrPQ{DG(#_wR!v6&B3!G~(GWwn@TSP2M$oC|m^w6om>5FTY2o_;n+ ztz9VQ2aVGqeG+^Hrk?(|1=cHOO<+TcaAE*xU8XvZVWW&ZasBAmm!pEFCVO;1e(ZWw zZ*im6o%Pbe8#4~8y*NX^%`vIxt$|Sv<`cne@*o}lJbrz3)Ww|9((90~`#L@In=iq5 z514cxaD5VDA`S+*9x(wk)|JIZK!|BRYzj?53_7H&jzovVjmD{45Qx}Re3PiOy zsKy+yq#@Lmy@yl0D!Oo|gy_M-EQ*muO zLt!SNi=nL%z=DRW%2Lh5*wU|joI(kJ4&23I2V&bmHntwHJM8lnizWb%3qO#TWgXuc z&>a1c7Xg}g(qNBBA1FUl7?S3qPwJ$fS`l0ddE+DY5Y(&y{xg?;Zbbm_n80&kk_3Zm zj}~@)eSHp)B4DoonJS1gLJ&|Gi7LWD+W{*)k6#v)FTAC!X2yi~$;|2rc8Ltmt{iI; zayRiJ$md}O^O_KlnhFTfzzq_4%HA2XfxTTz*qnq|EK6VlCxqtUhsXlgtfK7kq=$!- z6EkHvh{si9<*rLFtEq z{Tt}&s<#dtOk^Pen@CbtO7>lgo-R!z&zN??o8k{aq1FjF7!6WmCJ6C|b9lCiV7rqR z1paDm6x#`&A|Y_4bLmYJIL%8RZj!p}kPX0x0e8*gBhS^@lRc-L9H=@i;jAm3_}XF% zg|(i|u7mwdz3ISK5##O_cI)L3P>@gB#KbuDQhM7KjF+elN8Am|QD%8Uc9jX`=kpv9 z*u&yE@iDq;qC5>$>ggA8j-+9KLtxy+iWT&0K{FiS=fy)q9hEzpfWc|I_>48q3?MTD zIWtrX!9#*9sH}L~eg5I6bGYUK=&o;jP(442T7K;GMw=1i&c1{yt(i~S;#=n1R@E^Q)!|TWpw04CR z-I*E?BObO(lmGbD-s*hu^3mN2G21i7-ATePzdjdVIbPTs zUrt-Beir%g`oLrL%1A!OA)K=95?X~UgBsxv4z3t8MGt9}M=H81EORd)qdKa6! z7G~Ccwjz$loPE(zYIJ%;ZSrlOwbI;wVrz}+X7BrNF*sGYy||M%=Gr?>ceYwJv5KkR z6!RRb8CfwK3<*i2F_W4}lPVS-dpuI{enzVP)Uc$^PS8e0)uewLRz31@d0@<=^Vzc> zIsH6Mr|-*(e~7LoPi!bkReF|>@Ow^_wt6-kcwjuA>Bkz<@ld(w6iV)PoKJso9GP5_ zzDeNR(CcpGEYj~ZjIm2ze=%~s{;0F+#g2N3`^@Z zU1C>9B)dBK!MTZ!^(RqP-jnEF+2h6cT&W)F81oh^iYv@Lrv$=~LxvrgR@Xk9&ia_| zz+>2K*e1Ui5!IGm$|1(1q@vSSgUBr9;Ap;Wlh385N_Urz$A0>NOG(8~>Nm%O-!wjMPpS7r5W&AITdz{lQzZS6NbL zlGj^`BCZ*GeX!y1{EyY|P8FlxpD$O{TD_WcnmP6v1bYp4 zR}y}OV@lAzHICjjtFeOBEmGspFpKxpAp{C-aAO))HA^}gnh?0D^U&haghyVTkz?=U z_{7D^hLZs)pZ@DcBc2ZjXwtpAP>9TT*gJA=qtTAn<@WgnJz6y}<&rjg(cJzsr^&wS z;$u@KbxY?x9M%&c2=~FY;WeB>t{*c9?oQLJ4LHRkJ=zCHU!)pFxVg2*CEvCT`N?bP z+il_Of6@sQU#p@Osj#s)*BR%Vu z&2jlf^V%53chJ)pcR=-N*y7^j5}!HVW8BHT>BnEww|yt}eAjB01;lG7&)@LbP_XMt zahc$?oHAecj>oLmn_5!vVA+HB-{~g`iHOYH0mmSw%KHAm9eRN~Gkf&KdYyM&+MOF- z*h}K0ldXO$t-p`a&dFgPa~kaHL*Py=^xG2m?{)PYehq%F;E3jI+LD%(yA6dqHL)I? zSJ59|1T!NGQ7nW9=4WURG^=Eq=^wYOGwzg174KF}xMHd%{9{&o9do2>&k7H61lp1e z3(8IBM6RT}wsvA_kNlsfmX)d7R(d+5KaqNRT$_HjA-i?aJp6s0z<$na>A4pZ-mm;O zqA_(_$?JXguFT005XHyD1wM=`T^q^cf_EYS-1P&+se#*Mh)Ewd*&#)UfAN}jg8$Ub^ zCsvM2Ef28F!1MU@E<68Tp=El1?O^47_hBRF6%q<1YR|f-RrV!6=27Oo ze&85wUYq{6lx}`&N?P9iV}n0;=+E|Ftf6`G_vc<;+xXfo%MWF-aO<|H8vpS5g8x~* zNZF{%);(vR{!3}Mo^{derdRBtR_oWO1wXuKrEAzpv_5RJRWu>@ZP-Mzu2s+QOl%EE z)t)R{JUV)2IyJ@Fecx<}uK!8Tlg-3e>6=-}o(GHmyOMc!@hd5brA58J`Zi7E&N4YW z1)uF)mylR!@Y&{`I2w>T{_I(cl{#xleK%BR-L~R0ctOq`%ji2rr^=>495OiJnaZ*0 z=v>=hRXY`O7&P%>GW*K$F7txmGkyVaBA83Hoznx85*lCC4&0p`LYoXTeKB~ZHk%>4 zp>lP8;l=xK@(@~bYf>N(QqmA(a^DbIowQf9a07)*Gpb~sg0|aGDgrbQ7b}d{<~?nA zJzSK1;CSj~-soY__3>Xnl8=}DkLkNpElcff)d~#mNxx_U-^W9I=A7r_bC>zJ9yvH# zHrRcM=kdRC?*e3 zP%>g`G#^>WrZ{kBDG>RK8O z4_;lD^9^KyvhWNT6q4>Trf#LZmYs8M*trZe$YSYL{c7_e2$U%@4YNl63vVvmkSoHi zXAKz0)w9DpmevoDb_=`FA`5Xo^O<%&pN%G%@*(K;FA{cL{NC-$PTPk^#e>`Qayw~v zKxhCl@4~AwudRFFSo?{2$C3us5*jI6$=&N|JEJS_>^-zt4MV6v3``n6uxZ$s;ye*| z_J6y6k}GxGDL!WFf?QrQ?-k(lI2kA4^w}0^EgzX%ws3jr{Cwws#w!O#4I3YfM%{~$_u4(rc1C;`#K&z7 zrYZdo%PmiT;wf7yn~<;7J64I;FA;(8*i!$w%;}>CMtpP+Uz4)o$d1!tVqmvIu)!kF z{B-?_%wv75_FWTd0_r?q)gco;-zUzxFegO~ei-MoLsNc%y_I2!IS`B1WuAU&J>CC! za1k6;*gG?Ruq$=4*n`f8cKPwUfbq>x_RV| ziMrzRE5q`rjun?PSRA~wA3giET(`7l&(G(+Iu5lt>oIxFC~4Q-#a-mlF3;8yYp@{5 zcN9Az`)DsZ8fe6AXTPziu2f6conpthny3pmq*e*ir5~;-4(7@Fe_~j}G&P(tHneQo zEtuEY|JX5du3G<6`1))UTJmDiBGuzyDSpj;!rX|Lefry`7m%f3&4(e8h9PQ#Ityz* zbis(@w&;Tg;IDlmgsQD31K3@qAv^+Rpv^_2V#$0PVPL}xg%kt{vv4#gmFkeGSYlBb zFs=cFs*PDButQg!X6-`m?mJ}7@`wl?F}>TdnY1_qCeTsHEWFqz4~(0OB81XT;N#YO z>i9{w#lp(UyXzu)mG1Y`q$QUZWcPzFMwnBBSiTV^Co#3l;OTYJUb2;b8b65nFjQvO zTAnkP*6lNe!0|X!8}5Q$>q}Ir8vD@OaFAEx?&0wjSn){i2vGl>i;Ujij}^5hN>0`u z;4%ru(_M_uV6t zU3h(|0$dcgV#R8%KTB?wFC+!|h8{<36pZVTrA>ue37s2xQ7tke^JfGXje$e%hdxK2 z?bp%OtGTJCO`i2wPruJa^E5ReNxDbUu#0(hO&YrJvcVtQZs`-{=`-Uvepn&?L$-h5 zNd<_9kq6*}6KbB@2vLPXICbV-vS12W1`9W5H{ZuS6{7>Kv#sS+nTCfTTUj;qI{qes zbI-ts7NVH2;l#3=d9CK2J7UgLHexxu?$aLO+imJp)#qqd;nsEx>*fR&GI6@n zn}n5jKo#;ZG;43Gm~GM&%!Kn6@AED!6_o5od)6vdjr2QJ6?ye0kI&-Llh;18tn>5p zX8tETYxPG?MTY8@n6cfx)A0$n^}4;oqKQNP*RqHCWp>?%HR82RycV0J6Pwp3rYcH& zi%{P^M;xmTG8>MR)*8x(J?UoQeRQM9a8azs6QmQsYw6v1eq4SpZ!T?Q@8d~} zIq)V*ob%G<>xD{M-6p-uEqRPSEnh_-%iyP3GXKsB~w!;cE{UCH&mDhfbGe+dGrYSZU+EgN))7c?UooPbn3JJ zQ*7~dy)>zY!gaUT=`Ih@n?IY}4#D?7_SzYJ+rIW*6v!Ejkn`C#|Tg&5|%J|5Jos9!>4^n*HeWRgWS=2_M+_TRpcMKGTIEIcfE+m(#ZDwXT(e z!RxP_oI?Hgzm>rQzkB}bI3J-dvwpIVm)FhzIO*q$l}U**H*>pg!DXq2Z(HN98K3>? z2rA8m#Ong@`m!`p(Se1i4W(#C71Kr(0@uXKQ%{$ItD*bM0tLs__TIH;lowJZ-I{=9 zC2i5v{3c!EY1dfQn#W21NJkgH)HB$)UTZgQBH!*b^Iz%yt1cx+sAW(`-6O}T-(-y4 z*sq}L#lxbjeQp;Ae3slef!r{QHFQNhceJ_^QxXGc)3Fa(36pTRi6#LJF8N{%Ql{7bGdaR{Cm?# zYq!hk30q$R7q&YEcWaMMUVC-Q4A!i+qRkK%aNG~>SZgFAd z6H`q~KaTF#UATAn>9mYU&62`yA+9i7wRzolEt~Uom!x<@Vac~uQNO+Jdy-zZO?93Y zt1WdRL9CqfV9)zH&?SEU9m0lF%$;j8?oH~>Bi6R2Bbj*%(%u^>TQqUkeQn2q_sc4g zKIu4rHg)Fp$yc!cKBgHkwl_}@a%0qUpy4dGwGLFg=Keb#Kw=Vr(mR)>7b+P%f4t}~ z7qzpE_nC}&m2%6sNuK$TLiP+xXV)dO)Gt^!&^fsJ!LerU%U7?~w8TB2Mx9-P1=C~e zzF7OU;bkyDQlgW@#&#`1tF14{p#=DLrCpv6_#f_YYE)dbVe-BNwB_(y-sPk1mz+98 zLBF=kw!{pMduY;JmR5rDqPvzx{8wr%4}a&xkBL&pjZosW|rwv5X6>`-h<_o z-4;&)^n+vQY~p#}hV8cmvU~et0micx|00@Em|D(P=dqkMBvjJL#C{!K*`x zFGfU79ITLxVa7eO^u6V06uN&z}M~? zFn$9UTaT|taxUMdV%!c%?QCgIMlb9gj8f*+{hS~3=qd*q4Sdsr7NggIYn2W#V%0N)(0Tk|s`=En})Zh;AVjxuk z8QEY$vgN>XCdg{Nx?3;b0s<&bj0U;Rt3A2AkqEhi>9VeF&s2#jWiox{`u#pPzs2`j z635#qn*S_Mowd!V7(#MoO=>xBVlThMZ?U4d!4`85(~W-=-#*>Z>rSBk4g*KBevQ@_(v%Kn(`8qIvPSY?8%INmK_&j^lO3bf%&^;Q zZybCFz{FlX{#RNZ5~m$FaIEE6aZsCCfIDB&fxCGBqQ1#gIPYorEuZ}l{s)3*IcNKf zXYbGC)L||zIupCABT-8`-XIaVsrkNl5qBmh63PAko$>wqr5`?I3@$&e8nLyt)zH#n z=)W1&+^mv%uN43zQE_o`+ukxGm5v}qh*X0;c%Xn86B8B9L2bKZ%)ys9!D;2WV@@Oa zgUcqy#vjVdv&Wp06BD7}iO2L25fMLDD*=|{tJ!O8W`@AdIgC_lg5+oJa7Rme_U!md z!`%5Q{|Z#x0)J>*jiFA__vPjq7^?sK<9G{zgF@~XeIK;XDhW4?%?%4SEC6WUHb1|3 z5_Vn`*w4>mztrNAI56fU}In7CB_`!mV~ z2Beyl3Yq;pzI7a^gtc=00zN@;Z%yL1aK%C-PpZdaxoI3mX3T<&Fz4Ho*kBGQM<{s9 z%tHGR#g(JFO^s5cAPGl(kH^+z6xtggvj(wAig{z-*ATg3b55+8=IoD>v;iL5!$xu6 zAUDsH0;D;)BnotT`8X1&fFWv9E`!hWBq;}FL~>!bC}^muDT9QwzGaY6XO&=TCx_o( z!OVxogTq`}<#o{2ieyyuh)l)EFmjdVc~U6V@9WIcEJ)11U%U@0)zKEn8RH?*RMa-4 z$|~vQ)+>s0${LAc3k~}26pOs4J~8de8fI8P<*f-1%<2a#<$m^}#~!9ht(RKqfgZJg z_Cn`Tg_>f_&w?z2oX0FsZH{tNn4;A#N&)Px~N9wiym0Kp~mV!?f!rqTXO>u7m+NukKURPgr; z9xjISHtJA)L#pXtL9V-4S@OZrr&T1l8!&TiF>-`x$PiO#TXu$z7c5_cv<-E%jv1Fb zsgL)XXuYx@t91j25WvAN=jLw5&>H*}6RknBJ_a-fG_yt`J^`Fx&qk#h0rGKNv^iT$ z-8zg#yS@6308BbT~zOAEXP<1lkGHAd96A`BPBrhxuV^%bTKgpVOAt`&N~C?2 z+P9m`wI`>^CmU5`b9R0+B#bBTXa2~8QoWrVs;^f51Lz_;3*sAcFE6K8V9eqfCMsJ$ z!rntOG0lmUD@fSZt9k)|kGWw;(4-i9r(8udW?^lR4;H7Xq+Fm@!9_rVLV#-XF-z$E zBH?}~BycD{MR7`^sKJBP)($1@@jUI&8A;i)^xtKynHId=N^NAeX>`wExZW4jSw7;3 zJNzaVRq8k*2Ham+tYLwTt!+yV@Q@e;N?1h1`u)8;;7LCsJTQ(6U8x>xqmT6q`f+dk z`7?7SHK%UKg-zi2c|OFLI_^c(TpF~F<2L&?MZx`>VIb934WSLUP;y4PaFf(wQKc*` zga$+~Gddc|@Qgw1?K+F?BqDU~#_GislKkATwYNo%Z6TqDV$5lAX6PD@4-r&@?Ax6mkI;wKN9{2pJQI1ra$ zX3TJ%oSR(sFvVe}mRJggSc>=odzg7O*T6GOPd}3w5BpEi9#a6n-b5D{+9w%50dTqE zHaj2xUL>ivW~ACrcEbt?D{0T5J@~mlhn&YS0MIFBz8TV|hH-Jtz!ZDfu-O@p=mC8# zu{xSKR8=1=(x5qsEiO)x8m6yY)#_Se$CcC)Y(c36Erw`PMyR865kxkcH!p$W#JUYm zYoz9&ulBMugyk-bDF`s*EjC{lrmI*&Q_x1#lMR?i)E3MFd>fRbCn8gk89f>mSXxZl zv#e4wD1rh^1FWV7*%o+0fjKm&)D}5nyd)-OAP^2!Fwp__5E(&YqZ~VA_MeA@Y;I61(2GlTJI_hLDDwr~}p#gI}Ux=bZG2p)dE*J^$?U<>3Q-_m~ zsE=i!ZM%!P&yM`h)K(2MA;|}+a#B-0FBiCU4R?|N%ZO3h@JwP5TIPyzgvPp$S-c++ zp$f8qz1K~8V?l-uE>(!+W&f9usKGna!fiwf%tWM6ZJ1_tVQH9RAxaU1@2KJ;_ZK~@ zJB7SS!N&35)U{hw0GpB*MbX9DT(FG62#XRsuv~DFX-o;caJ#t@^{U}Pld~84h2^u@ zK#*@9z3w_poETaFJ4e;WLY=G?$$^LIW!r}614Xeh>&3!)MRIYxPyqU>U_xpAPZJ0r zCT;f|bG{-Zo)S^i92fM=MEYl#0zeQoEI%KjZ)O-CZothGN(#+=qs@UO;^qNN*N3Y@Q4m5KUIjWX zY&NX{aZqV&h6BGCCC&M{b}CZ2oQ(0|2=q8MhG0Rgh&l*8&p~W$ON#MxdU@0$Q`Jtmtijz$*|gF zSoWNJV;Tl3MQ!MlQ^oFv7ZcW0A;xcYsnIMd&5LdkPCulSfWS!Re&P5G%ZFIEh0_|a z+-GTn<2XiSlL3i==`$0z83z3|J9duH#vYxkEk@iH&;9So;@MQ1L4YJyXtn`!0-@$Z z>NxyXJ+B3q^qK7>*i0S%!w zfUBbvK@!FBXExe!1BGE_GV1AfD9AESTeuBl7c&1^h1tKB1lGZdf|&C1gaYVG8Nk-m zFimO85JeCm!I+|G9cGSfurhA2JTqNtnBpJ80iU!1wENd&INkt#(TA(^{$?L!2IraV zfu#Um;s(;eE${CYp&+1rPfYrIb>JHIUQy#N(5^#L1?EG*(H0djBbPNPjz9k&)70zP z(fA%V>0Q2&E}{>22ilWBRe*xL2U*qUqi%Y@pHfHED)Tdg8z}7z%Tso)zVICRbn zUer7fv;XZUGNrHl`TK^J8o2Ay~5F#)&M5&Y#sn~#K07pp*&*1R-^_0hdNM3aNllLQ7{1l{7sw*`Fr5!xXA5+ zCWEZS)r1|!tH9iXSYWQanjFy%R{9T>|EBIguKTyMHlw0qhiNi%V6%mpTVns_f!7U~ zJ&uXSWdLCG-%7g^8SyunZkqsmIbD5m16>W=-6NoruO$L$9Wi#1;$1g*ib^5+Y*K{%yfMo!(Ivt5CJFxG-or+ z49S|asce+V5bfcav_^*D(jf!%-@D%6{#QwMO z;;Af?hCt*GbTvU&{Jqo!L{=mm+<{Wfw1tDqA}SaqG`B7MU1>4)Hai0Pdx3|#x3R$q z5F3q1ZX1K)`#ADi5U#45IboODKGJeoR{uRlj4t4iwg7J^%*;dwL5_|EU@JZR~OF(}w)R)^df*Q)xmO!i5mY)2Zv|sEIzlV$mUY=Wpj+|hw6vovOOARhf zxq&F@{t;yX69og64bwm@Emr|5lE+2$Oz_3ixKYVxT;MBCN0wVfL{Ehv1l(W!$XBn} zwD5TBFkKBZ(&=7;B=^0foz_NpYro2{MP6X$aN*+K$+)cGOBHjAcPA>tRz(^flN5Ux zsEZlGvKT@O?)TY2xho>*VQ*<+h86kUuzd4ANCXz5?0%Hs6H&%5=72 zMZtwXwEmY-P0`_+OA34xb~}Za!RcRc=F99kH~N_KO_mhks{fwgM7}fpIfPS`x1xX( z{d>{I&4o{N(wnn?69|K6UfofEXL{%;;RZ7-gc)6dy?svo&#)@U3+}{E;{Gc$A~gA6 z)MY4@RURjp?q4kUJ%=_XpZRJSbLgSP3Xa&4th9b+G2{jG-D~0AhOYoyf)lWr*?li1 z33qffrVIPTgOaE>7yO&z<@`^N54I-k!H?amtWLj_RAzYnuZzy`7S%e`XPPL?tpbyt zTSz1K{VbPWa6Q&v5KgZ+lj~wAN1d$g8Fj}+$4G~8Ge52Qf+$tyUw?aA-h1m{_A@U3 zz9`Fbfk)i$n~i8O&!-XPrRUs1UADZrHeG1zZd9@ap%;%TJ46lz*Vs#kU)bRh!;Nv} zKMuuW2tLi&H}&5JN(|S`$F%tWYZwl9O2$6z|Ghj%Fbwm}HO0F8e-=k$_YY}K^maQU z)S|&Hnlo}BDZ>#KQ4`Jrzq7+9o1Y06yU`!0$_0tXs0Q zp@L&mSxte_pRo%4c*HW-=wDBp>JYjZA?-F5E~z|{M*aR1EGkKxRhEyuZ$N_@KM*z6nqxmX?# zB=b1R{Q48US3}J_<4?#8l4;eJAefz$o~Xr&R?tH z3e&vgvS(xH$!-A5ADbIWJH0WN3TOk=*BSxhnXdll!4t5Bj!!ILHpnZ%vKn3-Z?teyDf0(!{ojl-_#+5nbh!6>sJVCC>C0y8TPoD+*}bV`>WS6vZ6!-RLXG zS6S9xD$_RF;d#;{iSX8c<*n0KHlWp)X@Tf_`Ta1AX!ByuWl?125{t)Xsw7hNk^v1; z(_zUZwJ!n{(Pu7G5aX>QzL{a6!hipjZhG@eZ|aIi*VSZDN_AXIY+%SfDffID6&d>z zWzA_Cwd+rGmBz3F^?FO%G4v<_-Q=OZGD##27!%`d$hni{G_6nCs{5=a!d z##_+flq!hGpokEXf&-!yWJpBGz+f7q6a-E+(bFT7nkeE_W2-ib7!^fC_^utmTHAB( zz2D#O`TljEN1wcVXYVz9*6^2*}lVU!&Cv1I>Us1V2;Gx;jMlUGzMqF}H) zsH!HiW|rv{)8dk&yR_ADsxUucG=+DauS(?Ce8*GJ~EG?q^6^Ir54hV#T?0?J=SX;)0~anVxYrgi7X5c6>JjNVq~0*VN!xSG_ErS zW;!@YIhJT?e(?LHCQ=Tu9p_Xh8OkrA;+bd}hxA08I~Z5TJXop_(AXF|g}{l3kK92N z1?!m*@>QgEjcec<1rF;aqHbhzf?JF2WJx2}j7cfNVMRqHN@{j0mRG8B^(H80 z1^V%bVW2^ek{@TFvsXB_&df`rH3Y3>qCIg5$y9%I@R-QFRH}b8q6za#lh|PihX~^P zs$Aw33C&aO9T(dPov2%evIO=A;R+~r$euw#vHN0FWLYB@Al8#z zx=}+0!YW5Bfj0&T?E~^Zz-?an#5#>DNFK(GfjVZ)p#Ki*k3!hfiNBdu?87S|Zqj%p zh-73*j2acJa(c!*f}6pwXK}(Z%w;7CalO{xasf^5;uW69KQr;S04lNq?@iPSuyb=u zsO4&h1a_pQe0{9C)?1!mqD(}Fu51vxi*@8Wwaz>dEfe;i=nSf3QiDw>WsN*K*C(fm zLsXpfW?Ldm6r>KVxI!g?p|_U`A=edpPV6fQZr4s7?IL-h&8rm0jE0W4zFCv%6ol#Wol#@aHm z&dC&BlSt;Pz=wklz`?a*=>%1vrHrT^mzym;S>lina?Z9?F&PthLCSJ8#kS{BDa&jg)E zhQR+o0iEv^M_#@I4; zcqyoAu+luako2V52{{sst|5vR28FRW67Wq|Ep;Um!-`DCIM)G39He+FalPge3!TRy z-d>8=Tw)G%Ic&yZbyEWSM(D+j`GW_o8O>QVFU79pXl-|s#GziC+vGN|Oh__iQz7n& zEnHGxi{1bWzN{(x_OaRg0wy&w+47`Q|eB-AIaG&!VnT&`F$C}k)dnCOLQ-k@|$d*X1&>Ui2{ zZRqW#!y%@K*bK$gKfwDa>+cgbw|CLa7tw9S*yL3MtL5C_J5ldLZJ3x*sgGiBOm)3x zmXt#}rgX%LBr$XAS>{^~gE9bCqexb&5-S`v;&9DLnus38qIfM`quzfaAz8>!g{ilw zU1tErIbI?_qDeGFv(ca=G1ZywGjveEN(Psmqku70)LPBSPFAGEyoj?!5mpxoL5#?^prp?WC@{$Cr1u2B6_p~|g`bcJuL)eq zJP2-fTjkivqB6}1q*$fv3@C398pS1`p5)?R$#_ppwb7o`MuUcf`;`u`0FaEi$04rN z;GGZ|U3D&ZU7!rny3*LI5}{nlCn=~DDYuMqBEd^URHN8b|0AVpK8pe(Q6)ZrUPBH; z=?b4`HK;#`IQ-V``LA92r>!>(g``9oJA9Uj-Jx5}#>yIb_qeiD=WuPF2GylO-4jFF zm-#5RDDer&n7qNdiK7(xG!3c^Gj@H6GHj)0p$4D8wsfAWDe~d%_`{tQN{39h1RX%M zn!t;#%hr>tcg=L`{GDs_pkt?|+(&U%>o{>`BAauMOZVdipR+&;_tVU^8X;`$nfaEIFdIGHHzscJhTfn^$EyFwy%UP;tds4 z!Bn1@Y;LFE&+t;vZM8C3S^>ymMB1nkCls5rX?bbp!3t`JE38KifKfYKy%b_$aE3XX zDhrKOZ81ou$s)nxmD9Af2`AVzXTFvW=gNE}!5LUhbpl4FA4UIxEUa^lQ~iio`O^2x z$gnVwOld=_=pCUCYM=d))R=^zh!PJEBoWM+Kor~<8Pd?m80D!YSAq-IMp_QcVn@r6 zkHB(u?Sf8i{as^itv86L3-psXz&({pP$faHUKv8N$X8Wlg{x=LV1Y;&p&tM~jUQm5 zX$6^cgtA@*y<9PsEetk=TZiyaDSbr55S^GTbS5m_$Q06H`HEkm0Kb=ono3R@ zZ5+6(K9VBx03NInc1+D*GMybME^PY zsB%?38(j(YD3#jW>*b+=FghvhRXcPtIcCgjk#aXkTTr1?B}nu6j86$`=?bTMZF#A> zw$)N53~Ra8qkhB0di^y~hCyBI8ig}~7eD6}^&&3`J;M_!(zF_aNIwczlk)k9(1%JDC$-go2}DS_@(uHvubX*rxe_5${1Nq* zRkBhwBnUjfpgf6)3o~o(#rgytshznhIkNd85y8WP+U%?Nc;x9_C&NLZ_NjnKBAznMjY*DZb#q;d5+y+lMumwCoxP1!Rg zr8z@Y9OB@MudkqbxxpyD7mx4Cq?O^YkoebKNy98%-@z32s`f%T$;*A_1*}=WzO$cCez6 zvBXFt0G(job>t8QPIB(y5>V2*$ai}7ff$vCUdfiB7hfB(i~f5E4@>APPm6td=1g zgy-~?%qytVafWDy%u4J%;5vqGsT>=aLqi9vQKM@tKx2bZ))k}d1yD%};c*$>8Dbr{ zMnzQBrg-H;)B?l=2?A?pc8nud&Q(F`SS3pv`1LaC&!FmR6Nvcph*NdbY_g5&=r&AY z7(9(D1k&{_({QH~U^Pq&Wdldq#yD)xDUpv1XT2JqSL|H4f@-WZ3060z4hUH;EOhfl z6fc$R$yy~3>0?GqQXAq_d~Cd&7fYF2p`I!aX$gHQ&Pna&@}P7+!EvAlt5?^R!B`Gs zGfi#s%pw6+M64ChR?u50DmWecz@t-3QIl1;lmZ7VtaqtSvJUnt>vLVSlonx_8s|0L zx>*o^ct7Pq>^K+{e6O#YCHFiJQb&QS4I| zri+_K$z!Ceiv$yLGP-5en4R63MqW`Jny%4^4-25|7oM<$K;H>F~>CCh$C9njf}_xvD7ZF2`Z^T<_%QqO3TDL#~jz z&aye}L8T=dVoi-K(+*nZl_>GvupIo9n`+K0Rp2|vIdW6c@MA)^qL!A-X~sR%1_Cz> z)B_j%?{VmLtdR7`e?>j^cTVv(OsvdG03>2tW`0clV|`duMQk5)rVO6?;O7U$^R9(V zn#9_?QuRyaLMCVVxF&k}M=#Aj_SuHa?t#{k^wIYz5eZ%YS-!ET$bk8G1C$kM+P7AL z|Fqit%?jc#pE2jze?wnmCOC|>JSd*hxa`;U9#X+vMp{hm)yew;9Yt%V>A z%P$iC_T6vaJ^L*)>rHIayYgo{cb&W-F|l3JHNWP3cFs=a-+J*zYwfr1Be#oA+2dV~ zT$wS2@3wtk>%{TQG;U!xrs@1S?`_;Yr>b^$Q}Ag0DQwdG8Y`pe+^-CC=Hyf`14}HM zdfVp>P0l%vK@IiVEPw3!&?3Y5Qsnm2^kt8y$12T@UvhV@jk|K?A@F;(j~Twt5Y8OO z*ls$4ZVGaF7!`xUT3lYgE$2(rP!IuxgWg@s7MbMKcViyPzguOJV`!LD$jt9vwLPc) zR7}}d%77Shz+)GC8;4gG)6bvlRwslLP=&OB$4T?aCg>*aSHyU$`p5HpKZr8UKH&?# zr%Cab4L*#IMOhmtjhRGTUIy<+>9*v4g~8?+0yGFP^hcW=$BR%J3u(X@K(G(6s^8sxqbUa`a)uP|^KI?uLP zOpK3l1o(rN$1J-N{mLR^Z8*WR-RYAYxF9A&^2Dlk2N?8~U#%o0Qd{1JgC-sSVj7PgbPb1ZlUfLcV!Osoa! zh!MG#e5F|686dUCqdy;GUb4z&#bcw(E<@Z3W0{N~r*At?p2S^?h#-yOww|i8 zG9qs`aDn!}=Hr@wV%HDM!`XQL9Q^UeZrS10pOdcsBd97Sj(vLT;V(TwG$ro75x9+_MyFZng8CS#vIt(x`sUDn1uo;SoL>Rk_H#vj+qr4Oq=QUygR z;M|3t0P^zb)47FTuCX#Q0AGYeg*WHAKKuj=cDnM)c?x8%Rj)9-u{iY!EwRMTdze$N z58>&-W2ow7{+RluC_U5|$iC~0rT|t>Cg6@SaWP^mr&kuEhLESC@ePQLfc&Wtg7fx$ zdU<_9cQd8kh>%S|%h{(P-XKppOmp!C8$f{goD|HliNN^oVT2r7M%(U~K2WhJ2A?C? zjp8yJKO^pDo|tSA>tLaj8o1D01Ei6YxW&`RIb@4VGpk}mpAn6Sgf+VbZu&3A?ug#a zoEx-TV8nOHolf3v=t9F8xNtxE%4OQvR|q{l8%@k)3pY&F<7wn}w;2%;-}o8kd}cVw zHDHzzgz)6^|HPms&vWsed>T4XSGm~Nu9@U;Ae$Ku9nma^sT-#m?Xj@9Y(zxQE}I-= zZ>ao!no+O?D%aH>LSSgJ1rNq9_9oPT$3|qlWcQa#C#@Au4qCo@S~t90Y`0+dsc-(w z%pzk_ZvX_51&UU`8x;~rsDWN#T!GtPa(1-``&)^3ccUS!$oR+raN#ebCh_M4NNXpb zM;Yeury1FTGSV2}hOqx)(zMA@yS-76ehqVq-Ru!D|4Oy!7)VzkhB*P#kjZur+2X!* zp1naN8Gm(i*Egvw|M-?CDS8Y28Sjsc7o>-~aPEZVYNK}Jas)mVpD9BPU4mS=_rbao zCcR+j(gdbJPqz4AR>Dfc{KY@<-#7m@eG(ULwDJhJf~kth2TP3IjNHq2u?Lbz^dy16 zCG5Cou>B?||L!bXI(~Z6oqsLp(1Pz*RHkCL3`-wfrShEvu22qqhRpS+LNJbG!%5s$ z0~c)6V`K7m3&#MMb?4wC4UNeGRon^#m?jDXU;~l!q-g-{+JmkkyC+Yk2H0xq#fUTW}o6z1mm!CHYm9y&)zf&@gEEl~vmfU>YPw>=t-# z&)FlWL9Uj8%-D62Tb~FVWR?p<|M8m zCJh*X>`=BC@uC7JQx61S7K0#=M4j~`^51XF{_dBPX2Wey?``v^9su{_?)l8nfZN)= z3Xz2i{tCg3aa3+y8+gSsZ5krEv}!WHX4)Z`SSQ_&0nq|n(f|y;WZI$0lPyqHySL&E z4P5Lk76YQ)R727vMEro5{K-LAR)HK-3=PEg?Z%KdeS#+OCA;;A?$;np0Oifs71 zfg~1iLe2si$AC$DCZDHFGa^C-GC0(MV6FGU0eAW)ErFph}40h89w1r`xtZxaiYF#Ahq zm^bbl8Rpa@g72|FeLl?yaGh)pmBQ8>I7iL-3KKGEZPo72;3mXBc5gjAIfxSQ7_4aY z36#6Xzj!Rlt_C-r1e4J#ivu8_0Lt4v-Kz{qgf%hXAQh7%$$Q9n@EhbpfX(L!f+@EG z?C@BeUA-G@0|n@cNs~JLA4QFHJzBgEaUMeiyU#FH_8;3E-0f7=G{hp0SL|j6?yLBW zxIA#O1*m);hd2Uw>c=iM?*Aa0Ynpi#l(Ccf4T#6P*l*7vitWLOQIqy`L*Zb%2MAr6 z2`OhR0J5i5gF_rg?f|b9KsZ6!6o5d6xVjNI_!Tr%1s34bt=;I)!Mox5vCwdZaPggH zRIR5K2ol)`7`lXfENliupD-KI`mcojSHX(7`YT6}VS7%+XUOO;qWD8hB2ga@Xb8Z6 zs+|pFhA!h=kZ4CHt1$^7cHBTJe#6*p5MS{~_+}`tfZS`xC4j4e`j~=2@gP_X{$I&N zHi1++oourjN`rrk1;hi0yt_Nv=2~x&lh4c31tEuCrCtv@Hbj4M>VWwSf&JHeb|Tl-SV-oJaZin{T3;A`1Mk}?m>EAQv8FqW&Gl@(o#*Z-4LoP7^$fjPL#P z^|L#dV|UzB*7#>EvU~S<`P(1uh6C$chgjjhzd4Sh`P-~IrFhkLjPmJIeC7M6i9ZZn zU;V?Qid{b2AEQ>E-TbSD=lM_+k>LuP6XzIThV9$8w{c+Oh5`ImDqJJ-E?-M1#?=GW;GpRGTCRym}qX#@psp50^y~EG?M?Biz-)if)VAikc=Fe-oc=2LSBYRF_WyXxQ zw`Khgzv|jKW9H16c(28aJ9gcB7nj-pCUgI;XRT$yc{2ion(v%cv!q{N##X#J_ehu2 z_Bv@z>ivD&ZT(UY%-VA9p#SL0H|)XZ3$EqW?0n468!V20iz!VoiOhK(8y)u(J+Yh^ z-F)Za-Cg~^CT6`({NdLlt9#nA{8z17wY{LAK)T_YQ^o7kHM*%;&!?{0`lF#E-_1BS z?Ar4mx-tsmM+f2`U0=QB*MrMv^T93TDSZqCv0Havd)K$TW?ZkOUwH5%SQCiBE>aov z=wCAWw(I2y&y0NJonPSD+kQK7^!to=DHVeX=WiZG;$DA$lt3W- zcyQ$Y@zKA}x;yy!$r)j~S2h0SZuO1XvaTg%ExX>l_^vBGcYoIKgWJ$q$YzOON9D+G zl>~3^W#Ap~c7b(op53{<^HKTjlv|rN_B8HsYdCjk;K_|^#W_XczWF=P-pgFnkU1dv z;X&b|o~C5CuB?%lKm2lNh1C}MpsL2d!`c5WX$0PJQ*op9wfx+p#rNGk@cqyKlPXKl+;FYhF&daUe&{ zWU;u5cfCH9*fsm)$&-!mo;4$H*MxNO^rkEiRQZ0_o6XnjR$tmy`EHHA@pRqlv$%=G z@GYY@2>MGWT)MR6;t!9?FE!sA@ThpV_}+z|oGVw}_~jSJ(dSpPes*trfgKr$A1y{= zAH>~xENkR#ds#)r!>pD^S%l>E+{+c;e6jHM)+tBh(&p9Cc=c25(hn?bd)sk4^+5r& zd&=sv-EsTJ`IT>1Uayb695M1s#EW(Quh${qA`l+SczyHSyF2|4A!1Ls87Ez@t4 zQpn^zi=H)>>vZf)t(U*@N!*(>kH(+d4(9yy6KUjy=jfq@qc6&KKK?Op@JZWHQvFbc zB&IEM@jd*E*?#{xzJJL-rz zu%qm&2j9+b9DZ>b`Sf{n`$OtfS1Da*R^8bwgLE+Rc>f4TQs2aNY=;D8dt>GJaTN`BnPhmyj}|FcuJb&@{{Qm%c4oi;`~C ze|@(p`AO<;E8_o&cd@*_rnx2KG&VLmdAWO&FKESxaS$^mv*F2 zCB=w0SnCrmy&=af1ct8 zQy;!KXD3zpW!9S=S(m`G6U&paNSo6QrMV}dX$N$g)y?Sb^G3V~URk?+U$>eiPrWcJ zuB%s`L>H@B>;ZN3whN{UPi=b?4uLk978V}Y{MXt83wps^!i|N=Cu~CZfj20$vPw$m%m2 z*Ivjq9j^zjJ|Oa_>zN$}U74?CH^2XS`sFWX&3f>*D{IN3l&3%6efMk~1WA6raqf2& z70{|A_q*@DfDfUole6>9Q){R6-Sl3x;$q$j zuHZ@g{4PjJ9&5jPRGGCTHg86l6jDX!qJF>D*~skI1}?f1az8>^j5B@-pC#qxCz9YW zrMaWM-DJK@W;4cl{CNKSr~aoZ_d@Eq{gygA;}vE0&R@_YI=j(>d#9RxpLD~csq?Eh zYdU@2Z|_|EW{pSdAQQ1Xg3jQe4|pR~3yeDKxZz)Q)I%iw$DN_%V;R?PI^K^#Jf{gs zaib&ee)u_iy3bW;k^2@BcSoV^B7LrhVxVz!&%5t@7bv0o{?r*ZRbn5%jFL-YapRNv zMZftzI<{@SoRPk`(|_zI@RBcnS1yNa461M{Fu0*ML}%S907$KPryI{^tKMxocjp64ZD z5Hpb1o6c;!8FX!dAC|x6<--n;^c#Rlq8=J}g3KyDSD{XX3XKVq($m{N6us+jtAtr# zf7a-mtVc+$U0<5pKUCkZdDq(i&SyZi>O+->b*J&cX#i87ii~%!p?rlj^0)r!@q?2+ zvu^HZzq38NO)IK-@z3biLw4`4*!_BSh3D5DrCuNbk}JDLhr3=PMS)F(`4N_!0p@Fh zJQ_mPl{jvP9^!|dr~VBR=D6o)Pxq)2uRjZY%sP=QQL%Vek96JChK#H7rVG&^%}Z9-z=PP{m^9Rw@NkxB_r zN0pS6{0Btbx#3?uALV5Y=Di(leQvm9=gW>^W&E%q_@v*vn_UlITyFReCevno)*M;} zm4`veF~5Jr?zp48ez6{6t0p4N;-H{y1= z!^C)EhKh}(TMr&y?2 zeG&4{PVKddizlFPMPSbwzLfPGrpEoqOfalU%H-;<`%DB)f$G#hExuf|=tBJafq3?t zJ3C~@7WJPiGE4t?%Jz&dYV^UJdayWRrAd%%?*CF0kx;UF^AG!=-ZGuMmXlL|^QL#6 znO#CjU)SyM8=ESJ7VNmqo~oBDx)uJaKVv|R%;`2u(o1#{7~RO5HoM>9iXlQ~rEY3kGHO?e)DLEeyNdGcGE0zl*M2YlG3QwFb^7p| zqr*EM*2T9~%qEp|VbtRC5@*0`JT3#^Hybz1L1-aU<<4K-;(vk};&D@d>$}17dv9%*3|-m( zeFD?eUpLeLN90u<4b8u&-hy<~wrP8{qD_IsH1fa#^?vG&=DW(4 zH+L^zhs4Woc=n$q*CPDO-D@_ky-^=~u0NxH-L;cpF!{~@>Z_mT9$j-DsWKp3cfNlT z-{slz_UK^uRDZ>fP{%afXeoqhQ}WFP(@`_K;h(3z?Y@`JncwA8R1fAO zELwE=V!^Dp-&MZ-0+~=xLFHb!y~FFq*30f^pCfUSK6t_I;lr{m4-c;jUkjm#6nw~2 z;qzJzgNfWb5LN+ z0f-IP@h1V2^TnQq5W`nCuUi0?Oy^huQhG=mw4jnxOb7Mkc zLh5T-XcFpSF*|qDy7_sUt z)!6+rktf$DPzL$@dU(}f+FS=?w}UyRv%Kxc-6<`7!}#Tnq&Z3dZ-@G&4d-qonxfl= zs1M8GiR4&H?*^oFl7vl1B=p@=-!}5*E!6pLT%7TiotIZ5Wo_uS1^R19u$Z`506)3{}}L%IGO$wBSAkQ^$%5x2rzZ;0yg+mU6Yn^1+ktZ#niE9Q=wj zJG=^ch(nh8+IPJus%(eHbHr$;dru#FRwqgPA&R`|z)RwK!9e>}|Yw!qYzF05O!vl7^8<=4q{c zw^|EM>N6v)O#d*meR9UFxksOGI=UuXFPRrY6!}+u@PCK@5{IGDn|pV~uD<^HV&Mb& zoet-{)^j%Q88pT_PB$<{i~1Euga2-NU>oKa6e=JfD0}s=WB2!Sd+iU@p1BcBZSj&M zbcnrf+)d1?n-W>TZoad*?dgTS6!<)Q>kl9CukZiG*;}6Pw*L9rTYX0}8vjJ$R^RK( zOKyKk7r4Cp+U_sTHoyPg>=POE5@p`Z`=exINB>i?p&u>6IqPl2h-a3rZDgGp(S%5b z-SKp^1-mY{%7|;l^>NF_)vHk!f#_g#BJo@_@+XK5xr2;8ri=cI8{Dc~A6E^-lwD`U z;v7e=9Y2*sT;qmZl#A=+R&kQN3_c}2`nZ2|^@!Ym6xzfi3cT07HyeGN^(K1s(dYv@ z`UZ=WL?j2>V03a^J%%tY0a+op&W1gIE}B{+ZX+$2YydJJ!|38nrEZ!QYfTbtI?{i! z#x2`gBLuz*IZo(Mh+I2(e)VsB zzhp#(_($f=EO^s&*T}We;kKpgNhU;4TD}1vsSt#i5-j+015JUM>ZCLT&BJ*qKwxsP z{)!NE8O{ko+(5noEk4E>Kjb@I%lRQn(Yg z@R64X_5~fpxtPMWqF9sU?gPF1a740oWUxyl0tlIxtMagzmwoksHC%SuF&AeMc(8W? zb{>;b4XqWr4^`!Y9IV%7BSmff_XZbbJu9!OSCSJ0TP#&_*oG;5lih?3E;k&ABj3pO zovv-EYQ?JP#C4V){%;n z91{370(UmfD+dtF!8x4>K}=MZTcSr0mjcJlRoym2VxCaM|5%p6 zwhQ#;Ih=bqw+gI8Pz44f0t9XdF$JqJ%!AQ-hP+Qj3Ne+sFeCLoQ3iK{NCreW!36;j zr(iHKh*LC2I)Vbiu;7)f*YU%Nqup(z{qM^*x9#^&MiP&IHg2S|?Y-S0Gh;5G&!Ps+ z3PcxG6(V+`8gDWnt_)h?XRNK}kWA#R{DLYDp998&zq#kX#_q7S zb#IogYxKL(2cDzLN7Y$x+Wv7VkJy*(8?ruQ9zWS47(9F5;WTrV6LJ8kdQ_{5s=STs zj#s4+R8D~+=fF+bX=s&4Zm;9%!fflH&=w<>r7AG!iHp#oTE(kM3A7#mVsSoSe*jsF zb+8I{K8;GU@bE^QDGn_t+zDM3~SeXy6@{0L#^bJkbT~7E(2 zLLoEK;Aey?q8UufrE7Sx!Z*-Ps$Ii)8uY#-MuD$adSt?8NQR!0x14)?FitN9MK_+_FulGx-e za@Y?|;3Y|?53GZh9EfuMPH>{hym^pL;3azTjw7GI-R17Qe?tRyHT<A$>0}6g$%Y?4u5-ulZaN(Z4Pr9PAfb(zoRQDRf3`Q1ymb8AKBc9 zlZs=>{pnRp7E9lIW?R_tohrQ~H*1J{xp}?C zR@oN=afEb+>glR>Ce}>u#x|EI9$ph9V3yjb>+2N<6p~V^mPlz%bJoDNNO?#tB!NZf z^fa0*)ZIKd$@1RYfDAZ@gk3w;^-9Tpa%F|uIdmOXay?LJ4Vy9sjpS})+CZ^bUxp;^(Y8sa)c2c2W#m`-}F{|%pZ*SF8b1d$5okRYF~EjJoR zkoti*>!1~*d9ZV;7Gx;Ch^N&xASx~&*l?vp8EmU4^5O0$7e(+&GZ+gJbmV69_!S#L zKN?uwPgSpgTF<-;zNH?9K3q-a!R8ItMWLIRwB-Zg2&|x!+C#Ww2?AMdBu~-Zs~kUo zy-1&O>QOl^2?|@1M3Y7EKF5HP1cQRTOPPbGg*R(pQ_E~tw8oK^-%&$Eww*@lxtYN( z!bneUuX@p6!1 z?V)!vm+K4d*O$e@uuayyG}!aPJzlCj>0LmqGjARW?_fe72$ic&o<{X#!KTsL*&3IW zL-plr^(R>?RHASh83_@mp$3{ddIsmE>SG zlq)aXCjv{VkT?28Yl71DW1K#gCsA_5X4Qh(IIHo!Q5qq|vP)yUoR?V9=c|P1c zE0(%f5!k_^-Q)3d&$M3cQN3Xb`E`2**a9L&)}xmAh;iN@bYNl2dX)lNer!e!M13qG z_6;qyMM%O&%F;N;dw`E?g#zdG?iFge4&})x^MUQUOETup&aTQmjF8>bb>%xuQ#EeXyJAsSvxR~mfU#fn3%~5!( zf9x@E#3&ot*tscEl2$WYgNONCC5}@T_b5@|x^>a)xhaI4MvJ2B_}SWB1cmH6j#I@= z6*_34l@Nz$T0lp!X~C+AJ&maTr7imXqhTmnuZq+MyD5~ES1cgsEZNODe&sjum;krqA4)DI`(_^YcXP2CYRA4qxY^9+m zp_h_ne^5au7h1}>4p=$YzBRfTnk?x7fxWW6q6P&{Dhy*$`&H0h%m~Uhx%hWXB#6*K zYq+E&tl2=zUy}YEolulw5yf*%>cDo^D(8XXe~FEJ+yap+86asA=u!b2N)4!rU{izy zvV*lfnkV6_C$x}&9TqF-uoL$KETmdSgC-Qz zk#de3)E(DjRc`%hy^F?ybJhP5wtE$l)pW-!9*Ui zi52O|iVlV?e$Ye1+e>6Am+-yW4C$CEs?W(hZyAwaV|q~K+N}2 z?eop2n#wt0bu1T69dtv2^Ct-^@;%tBpWd?|m+^rZv}uDrjnFl%*;>nIcjK^93AB;q zw?@v*+zZYTyzzIsY3q|HmRd)e7qVYeVy#_G7~`=HS`t=9m6?@YB=b}!odyIcUMeTO zCShUF(UBz#vw_Z2qOe#|tm!~;sbUK`c=3vjy=lVeWOE6^L(8ut#tOk%$-Ee99ZR?c z`keq1Ad!p93^J7u>anneriW_sCv0XQ3HD^Ysy-KTQJ7|-kY6#^d(&c-WK4gDf$r-9 z4;87FB_ys46@CJYvx>|clNvV7{uvnPZdO(bJ6?gQkB~H61%oc=(@$RmtxZ5wt4Om= z8Fk^%iVE3D+ik54Dpxoq$T?Klp^3W--D`m7M(bcx;<;3qU{h!;hm?L1)-qILCrHv^8ha<*7tnBlEJ;JdhD-W_Tx$)Z zg+q)*Hf>^#vMBw~ce6H5Nf$yVomeBNR?E{c%_y98@H$Jgfm*07b2t~f8zZ||md!o; zN|vq@t{A9=sfF}3Xr*R85UZjMAbU~w8c-x-lruy$Id{dt6zJ15#-V{}8KjV>*2Im` z>QM&2=k)ADU9qx`*3$c!md&W5^(M)p3iZe0TM$H~KIperqI7k-Vg>6nz-8dXMY5zZ z%HKX0cDK^lBJ4mohqQ*b1B&P%&U%xASU=T%4(L$8XP*FSDbneL{BhYqbq|H(l669d z87yiM%rH)DRMFtK{9GR)70}2K5mc@oYF>ewkDs9+5YHtG&Vu7jp;VmnlDJBtb!@$@t{w>9V&_1-0m zj`tSFsGrY+u3RZn?oWBKeW4o}MbKSi9+LwtnWCEyrLGS{4xzW4aIc~si|E-FEe9cMF&fGO-*F6vPSY9O9bFcI@oJ@v{ksYjXU0lD=bc~pmNVwg$A4e}H( zNrW^s);RmRV~Si7Yg)-HZZeNBZ~pfPPR-A|P7baWb1Zd&8RpFlE~{}cWE zlu2L7ZK8Pw9a99`sAZ{vZD(_>w%6{IInkc1vn z6xnA&dk0LS&M^~tY_`bSrfKn6pCR(jcL>)w6l&>y8`QE!^QdwQdA@U99uy=%pbj2q z9K#vV$~Z68lVfKNbr0Py{~o?m|knCNiXO=Ok)IxJ&3Bqa0K!@W?o$WqM}W=Nw%e1$?L91KLJVUjLx z_*5EN<2YLnMK2->3kz*}D^AZg4?bp)%oirr6~(BK=xr7R9y=Wloi`NF6Hk+e^tZ%D zXcqFI^-Mav8Bei`-8~e!3GNz$_^KF`AuUytCKMCQhVAmb7_a;yyvjI}N>Y(;06SR`9G6Djdx`cv)@jRH#& zD>Ec6EX>q_eaNBQS7NuyVnBc$z-7iz78J55p4s9=X+FuFE6~YNuxyuxh1(YhlN>lC z%5rw3#Xvz1Mo&^QNEoOS2&pzIOYLeRMdBlr4H1Y?Oi?^Fd3uosaN~EkVzWyOs9l6!(FaE^s(&{8S)T)7P8e$f^TfDTQWBD=EFD0;zeVTdg-uNjFelqbfO zgCU7xVX9>q!8#-5m<2Io&ID1KP8^5P%QakLEodcM$hmumfit+Q^>Yl4@Cj(Wda zu--$>Z)}`SKd@b=o-f}+#Yf8k5eM^%vAzTU9WMr zy_~YwUPwf*md`YRCY)XnhC~&k&mi!a3fkRY$7ziNQOtWs>^L%$q zK2JeI?=VKxufhW9aFl_lz}g_t9bhhp8=C&0K5m&DONlT-r1de^RgV$mLC>nl=ez&E z0056}o^?|_gl{6Od?sew`0(Ikz#|Mlxpr`31^vuD3Qf z?a0MH3Kwp|g>$s0imF1D`A_MZy-tmP&2;n5#v-(2xI*PJ7FNc}FSb^^RO1u8xG>nHYb;T_(8ejj zQs^eOa5hDO-wd2pb6DuJQ5&n_H_PiR>@F#0mWbXihG7J+K!s1BLLJW7I6>%lifrNM zQYp*6!mlWj+B*9ZS!lCv~l(Rq$lIp zK5KhKny~7^89^%CYi>_|xvvD8Dv7Y>AS9tAi&lg`RnZH^Mz_hwg|~p#B9%L3S75pFV6UXkmzKz` z#eCB)nApUY{^AO!l-7!eRL4y`A>;S(~5NK5++2TnKD6{_hrBBG51q6``6 ziMSDjL>u0^F=~-}d)$~bOGLsA#uT1U)GjIXpX4^xampL{3CXMblo{GHW3muF2zAy< zMHCiWslYYE5mdNVs_M@{B`HHvgj?#)Y~Ymlh-B8pa!j2Y@u|YpOLB{wnmA79HA|wG zY8;|^^+!|y(MFDC{O1#9Y9Mf@$X7`z(HhEtU-H~Du$spRu2tbHqIkVYoZ?bt5sR{# zU?hz^Ymi*!ZN7JXt!lA0bkovrt=;6S7GY+|(S-b4EOHWNR`T50)UH+0FcrBAFYSvF z(IN4=vl<*|5Ef$MCZ)Xse((q(Pi47%ekDd!W08Eq8@H^WxK~upE$HP~d2N;i)=@XA zmMLOcHB88oQZi6uN)mL{O?96T@e`b3qL?Q(8*2kLITBcFmiDQ_mCF#J&Z?`fyLz+c zFwM`Z;Z$t?a`j79agUhpC)zxr&Ms4Fq%8GFFE@}=+%#7>MSjp~=>wdWakxj=BP`%h z9-QeFAp_EF8Ynof79z+qL@PfLR(=EJ=EZYXe=G%}(=J#x07X0mFv~_EnmB&Y$ucZ9 zf87{?zl5bbP51TBKk_V*>Ecaf)RR!C+?RGR55@4(qK+x(+JKejNfc<{n<$z>;1Nlp zDRLv$VP6yz#XA%^6}laV5jb^`5k{;i#hxA^MYQN}69;-GlfD%@C9gIK)srew*1|!% z7RSOCDe%iE`?*CvD{MI=rn@%AU4w^S;9NSMi%Ijwi`^Bq9s=mmRZFympK%b8NqDX=jfb$K<3mGj zg+%f~rTrQG;CR|n6Q)FEJ0^OmifGFT6!C=gl^Vxo)S@Q$1n2|ELw?sQRhdHluRn0t zQpV|SR72lgfTx{FiPQ$TXIr0`kz=J0RIBztPwG_&;hEXi7`DAPD@rRUR8%8!<-^NE z!?b6t0HZO1!?d}mG3lk)0ijR6Otq)P@w%W;jq8k@%Ea;n2sN)zTO$Hz40`QJD+NvD zY~bap3D~F-s%%4J6%zsrA*t!*hd%4->dm|{I|BF>Sj%L4r+m2(W;BF@Qs4r(vX6Xf zMZ}y!4&~h7)wJi3NG(0@w&dnk(X_bDLgMK{b!fR4GGGg7+FEw7`|R~FDI%woQwqv2 z((cq?`O<4W;vLnmt@AW9h<@TirKy*jAh~wpbL=)jL9jeBv4FTn1oyS{h-DW|<*8Lt z%hk!1jGT+%>m+v%_9oeI7pcOM@z2OhpTyYmAm_nnor-vy%v=9}T&JQx$YuC-_e@6! z&0^^S)_I=ME|D9JiQ&y^&@DPysI=vhFKJ_LlVv!@CDsWKO)g{UtUT6|rbxjfOL496lv^@R&sZpQswv`m?ul-nvPj6pWBc=9-^m)wy0eonp zb?o2{?s{9v#Ak66XBG>+$woXzX{z-uDfLfE)T!+uQwB{GB$j(i6~x5?f`r%}3YGR6 z)f_E!zjG6D>1&|FD>^xH7rfeoM@J_P9PV`!5m!d`$3d66Lro%KjgqvcIzcD&6*bXT zv(e6!{p;Jsoy`Bk*qaAJ-L((nwnUK`Th=hft{5U@&7MKXE;P36NwV+Kj5TXB*0S$H zD7%cUER|hKmMB8VmhE>&_x;?@^Zh>W`+NVX&&>FobDis4`*oe`7y*NIHs`{7>de9;v@!f1r#q?TeBs^#%lB`EK>~jvjoT&*grt8uO<@)HAme5 zHvF@{Z1_*kAlxvl<5YRBR&uFM>V~sk(rC`G>v~?1rnBCXZ2()oO8DJ8f_JVQC zMU~z8)5WZ3!#U(zMK95X$!5b(?G26f9XyN(Zvt@B3YSd0fUOZlm4qj!FPWcb? z{k@86_3AtS8Z4_)GKaTZwODhixX7m5JHLoDzt-?k*xzvQ)cpm=f2`_iakI?uWYNc2 z#d1|`vuOSL&ZLt;76qKRoj&|GtQaM+43{lkUjki{32PZi8Mb6EjWsLI{1;k1o{s+8 z7-qvY3|?hk^*-x7+h2f8Y%Og9N4r1!%u%}@?k|8t{M%iu?mqh~^>W?dDEN19hAnK` zvh+hDYV1{W{T=AxCB%|2EBud$1gUNT<6=aH*Y!D^woFHL?~)+^aykQTpLB7)=Wh9}AO${QIz`u_(R&fcO70?EgSF0VfGyR8W}{W=2WrbrE#* zl!$V!D)I-q8xAHS;ND&jbm`?sz55TW^v^ni-lfBwyyc*pg%!XCxRli6q8q5-i(*o7 z;#TvxsAPfBBw!izVmBGS{`Uf0MxsTk2#maY>+9Olr`Bcl;9u&8;7E9N@btmK;Bgb- zgyUAPZjiqD&hEjY!kYU*Z0k%nIBnN_xcxDx8_zkclL@bbsp&aXozERMDoO%3h4keN zphVOJP|zi_{@F6ZGXew`jGR5PnNGzfr6q$R0<>NRL$Q!VaWs>m>xe0yUvlZXw%M4| zO$RSVMAS#84-H0qm~ynE- zJipt?6JY@!@dW7cWuWp!Cc2p^AFX<_mW&a?bmSiTs9ZJRY&Y-_)&g3vUqsRCp5+1G z6Yf=s0e1Ot-UFx70m4+Ma^7-hFGc|M%af+yt%#+EYCgjMi*=@C|O3uK~(jWP*y;LQMzN>KFRatx+UR|&^krAc_OhSh}8 zS{Y3j)tD2s7ng%Q2L7lj0X6z~*l;cI>;FQNSNZ5{U0NqUxHl~ily#`a%$8|@@iC?` zC3Q=}D0)_}d;q!xxOL@Zf-|Vn6XgH3jPWQYRk@3`3`=lC>3`p2T2_e?&;%Vs5(o=% zkSHZU9hbs{*Bp6T2b|y+C*GBJO6y_$aUI8g700F)I1YFexET+Q{%+eS9NEb4_3An; zU&!8{T0EH7J<4l68ar+Pog+T3mC+8X$rjkx7B#1l)k5QoO@R-^tK^EeajM*2+5-j_ zgMXDVtB0@=iXL!`sVlz91fy5hnPczu1U!H@y`GF?x`)eF|6`of`rS^IS(U2>=2hqY zYk~jHqgKWEuZQTg5jD2x6sAraCnIl6rVp}z>C(xRM$wa{nm1V2v6H3Eek~2UuAP=Y zMuvd-1OEH-hMBOF(I034sGoQq%pxR88~-cdTUQI~P$Jt-+;N@PV^9|vB}!wZ7=;kP zJgaHY`T^Ew3Lx}Ee+JAHU*x57e6@y+_H5mx={i|_ON&z zJbV?BD+Rd~2G#U!-pNbO5=8~SLp<_{I*>^TtA_-kWmuR=la0RZ{whJQW*2LMDP ze~IY71C5PuXb#$Fb}`wzl$FifX|>t=ud+9v9B)`{8z^iUv{)QgDQtHi;B^lobOUDT z5_dOflV9pO-Ha~LT)8P{%wCjP;(#GtkL!e;!^6EUWeb!F>O12{$g zwx$+c%hU;M$cvS-`0U?w(?2Q?JI4Z6jDsI?BchsjQ)!yIk9Wg4*1Hvc%kQAenK~^4 zx=W|OJUV_G@3%q|)Lckk#So9Fi4R=%Km17(ILvXBy5naiJlnjscRl#~ersp&diK%j z3$!&R_#|M-tnatZlpc5oAAJm%)orpOeC^rEujX};(Iej25~Hw{`7a2X!=wOC_eW`3 zx{rUIK(HYXbnK-ic-`!etCol43J1y@>+cnIx7;v#KRP`-jbcE^-lGy$hobkd(u27H z6U3$p%{QVJGixF|`wB(hJu4JE7=HY-c6zb(aL{T?;rN|;z(IUK?jyV$_!R{mWXOG4 zl*GxnZh?q$W*B5J2Ply8_d&bweepl&R5-kS{MKr>==k?wkO{4`s!J#THcI5rG!$Ou zz%m9LlO?3)A^c|m&4ZT8W5M7Vy5M&-V5WoowSC65z`->S;kc7!fX?*BF({ENAp*eb z7F7(noV80p(e_i7qux7l+5%lxp!S^-4 zR&$h)TGjbMkPegOm2~3UjR!}Uz%ixLc)#hfzSB|3(zzsm?dOSL$hvtG*ue((_dk}v}J?Af1T~mf`4UI9-cWq%s!S>_$kMcj0b5B z!h-V%Ms_J7zVrLcAA=M$5lGezTry}m-fx{gt+1;^XdZU(OZWIi>)w^)g6V_)>0diR z+iQDK#x1YE*>LP*3C*bznF7Iabb`(LVeoKj5WFHk7r5r14H{{Tc!NGS`{3BUFs8<8 zzt(EAMq$6EWy`#Eq_nbSY3(4hb&fFS^zZWNjnneKxQ^dvA0)LbH@3V5y{u->2hE&6 zC~w`W5BNQly)9w2DPifqA~C(+JiScHu^UpW8~l4H=yzf8WVgbR2gm++H{4@esC7f= zs9jfp!cnZvBAsW!0qOvqjww|OQn_9?>Mi3K3rpmeS-%vG=blgTR=y%Pt(Wy z!PCcEG{?_^4=ayS+DjdFhnTJ5A9)hHbGw``YMu8SYTdp4e|)cRrX{I|>OtoVXIOlzkMb zr!ry|E0|A6?`8(RJ^gu(ZhDXS_?L0eTM+=X8q|=0hpy{#>Nm_K1c8fpW5*Ayqv&!T zV=c5KPW7g%VSkv}aE{yA;#wBN8%h_Q^y*ImXwL{1sUG6ugWIh3JUQ0)#-@LX1kY{C zA37iFT=OX+ssR_tRI|XW*J~0l1r4>D8 ztS#bJiVgcSaLZnx)k^T8R`9sG!mlm)O?UZ4tApjiy*I(TXIiW;7DfF50G%{hVh#g| zwVML5DSmN(?dl?hQ>j`$S8WmDL5D^ESi%491v^Kjao^*8|5c-X%aL2Rn%BTF-*oF5 zvQzX7p=c<3J62`(8nP?to2@0PPa0<{#wVUc>28rIuBU{_y={=Js+U?fQ6E|GtkEyO zq`ssgu+o27zO_Ef-+jf9I;+qBfctO{G#W-5Kzfi4Fkamd9I60oKxbVb4&`p65H;wd z7v~`6g|&en)k1LnNW>=8T9Lwr@N+mQR0o~=xnghwDlt@yoM-wfuZ7@x3C}}V%C9_> zmlP@!5_8U72%I4UZ`q4M!-)~Gdxm7-kLXP}#XI{r7ie89Z*>~3xy*+0#eeveP2NYJ z#Mg#I81xlW+Wt2XRLHOb_EETaBGPM2bWmSjlT0ihmQ~wtnDpZU;NJPS{ZH9MtJ$8&{XXEEkH#Qs6)n|*$A`%Bf7xF$P ztdEGhO+^K7t+Ou?MdW-z^hl}kI_R1fo|6M>IryqD2YT6Eqs1X3KYY-?)c8Ye z_0{49{bG7L%Yb!Nx0XHoqiy%&RM6Im3)3@@d36soJ9-V8W_({>=EEVa(|KZJEe_jh z#3WNyX@>|s6M?~y5-rk#1&uI%j#LJv*mTE1$q1d>ZWMD6HJ*cXCk#v zBu=nFL=$;lMDl{Ov2n5uISJS!+gYJZrUJH@tLG)sosnBfE zrexStI1f}9LYxUA%~(SrdiI|Q#WAw$b^n_1uNklg2q-BGkS)}jFuy$)fP)+#0DvSC z&oo72uxR3hoz+c7y2ypN)8~G6&B*^IAF{1c3xRg2#F!GMM+KFpzmfLz;<@^uZfGRC z9gF@SlmCy8e-=+6nou)p^uffuFI@M+*Y?7AF)%?vU1JH$GBt8TU`0n2!^>8Ag!cy9EF(y&Dn3%S|;j#NMHP+dB@C%~cVRL^g zPE6?pY+QRtPZicykwTR2P1Kx4Q*G!Rdq-14D=V>D2!=^K{Lik{KnNdC@CEkp-#!tx ztN!iZfc}NJ{|XP{L?VRXtQmRR{@FN26rwi-V$BExVgqVpFUomO*dF@VNAc)Clhb|w z!9J6&FxkC6aGRGxlmv+<%!dKn`)@P9JUHmI?RdN*QXk%~65(_8zhLAa2v9Iylww92 z5qAuRi4NguiWK7{!#<=Cef|HtUs@_;C`RIi6|ov7h_9G9rRbqDbIaab;QcMT=eIvaGjLXNm*upHoD3PFbT@7B^lO9{3T@woQKRTGm zK4iwIgoTC@1Ev-61XAJd%)|55Hr?m^{QVsr9cMrTOSVC|9kwfcfnf)BS`rdu*~j|TWA>MIE^So+MIlK1JCAhi0F-5$5;V~p=k zz#r~R5016`HY9Xf1kHMp4i0>v|04}Gb@cA%otADwPq2^NiXlXyDsectfwhiiNH2m; zB}=i7E6KrA5NrYx>Pv!n#Qz*;jd_R>fa!8B@Q_JD5wSgpvru1@JV``bR|wX6BLde+ zkCsiax#x0^i~{kbm(HHifOA77uD1rl`8i4yD&9*^;R2bXDo~_=fDe7QyKtstXf_O+ zxMo2cQp~eA1gAMq3Aaz`t&wJ+LvR+LWka#b^jHs)O-4L@0v|*g6-9&?;DzxrB{&q& z8K`7oRAdp&L1Is{%Q!i6Cgb)AagD-R8)_1k-$RaPMLM*EG+?p zCwKy|6WAT5JCBu3;s*Q-MYPXTibo4Td|}=5rW6oP9aXF;G88NZV2L6mZ3C{_%XHP! zaq-K0Vx9Z3kE#2~=nzox0eS-73^;8Gh`eErn+-pIrONA;1e6XTP6^?BOn`pFMW+++ z@X3BX{twvc$5rs=P=G(hJ}n9WYZ%~n*arT-q%_z{0*JD=?GdL=fILe~5`lgD4~WcU zAObG@TBQ$uvb6#b5*0kyI-nsm{~@4Gl|CeaO2DkRcM6xgL@Z_7mmm4#tbw2=?4u`| zBpvM|1aL%$mL{VJ)mBmT5bW~5bZ7p`fTarOZC)nA7SIh%%u_%}SV>V~|3ONM=vT-r z(HiW@_GLXJS~rToU^RKwLojH75i&FyXS)D4F^MRFJHdvxmmaHWe-5ApBYQ&M{~~Fq zxr$4heHj09U;lq6F0r(Mjq&J}A1k>-3yVI`71LyDTmuqab^p7*r0XR9er3yVK?TrxZTy7?cm1aatZ$!)@adbGy~#e_ z%N`mV>jwR1FSZ;Xu5A)}_iD%{a8i(vKO*0IeZHm5L4K1tu@|18a5Sf|kYa3k8}wpM zPENiHgv@i$lN5H|(uWIV1A_4c6NJzyP=2de7ie`TR~zzqKS(a=Xlb1#^qCFbvO0>| z*tiEHG|zJfJ?I14FwUoig-_Dmni52U_uc}Vh4$5I!}1+h*RO;& z$QZt?FS#6t{6NYXh+DUX;NZNbeVlE5D8STMYnUAm!bmbz&xnmrj`3cOCw996+EvY~ z9z%!C;DppNXr@Xg(~{k7vcYbzf*mCXqUPw4ai2G@+!I@O0%xL92$>S6laPUMxd<->zAEmv&VzWd zRO()65YGp0w+ks+GyZR2hQOBulWwN+ zX@dM1L~H4~2A)*LUrp2D8ic+ORc5KL=Ia<1I)jMwrW*Q`%}YhhOhrMv9x|eBcQcg- zl9TJ5%Wn3}^Hf3J#Spcj;{QJv{Re;5@x z8OBG^<`zQk(D669xXSft)UXlb`%yWhrrZJ~0}w3(u>olGCE#IE3e~ zc}QlnV^=koG_f_mi01PE4}x#-0rxnyP6@z@0P6RYeN?Y_5*d~)T2{$aGh;G}-pf$4 z`dM+QFf3hyFgHs-E#?$>liABq;Ptydbb+lB_9dZG;!InLF0K@W;V8bb_T7CEi2mG8 zm0*G#HY|_=##^Y0vKLf{MGI&{x=e1BrIIKOhifwa>bOgc=*bd#lF0@&G<=qLOG4Ga zn>Y9GT7X|Pc0IP&j4oZuua@mi=OdT?jOdFfh@@q4j-n*Fv&KB5(oMnrU050hD6f4( zH9wi5oB0B@qbtYLnk)TqRy;DNZNA4ESbSJn&oh9IR6=_b+C%t7Rg0OnMpTCl&vPmH zYKEo6=HK5b{LbD3!DtGXT+FGez$)g{xF_eXD1$8lBaopRmgu#Xq6Kt1nKk3z-wQxj zwCGV$6jf?gl@!M;MZvvN1OSkoVSuM1)oU(~GA2=LgFTyghcq_Mgh6SfCgfh5jN(c3 ztTIu;&JFt?%G?BV>%>&e$0#1fQ=?c<<pK_nAVqZre*Iyz7JOjENHq8I_gWb;Oa$bR0dv&Bf zN~sMqzy-F7`E8^M8GeqKT0)hII{rQI7fE4-O>hFi8ro?B6tKB+X$&`Md713P2^8eJQE4QvbK5*l!_F}=Me9000l^N^@1>FcJN)N* zi-onS8;WDzRKsb@T!ig}o&Ctgeo+??LlKX!qq`bt8OIohu3(?lx}uMsQPC#FC~+>_ z{?{U9nUfs)!dxKY7?M9sYx9{LU<8%O#1~GiXWb`@N0Q^&Qh<085zGe#B|UdiQ|XlN zZ^wuAR&nCcRb|hxlW*ub8}`*59a(KJvnSfz;7lE65Wg&=<{lhbvO)It{1fQeFBAFcU!+VWCRK?~N_%#NN7@BxDDH+k9SF!;YV$~A3-t}!&*Yn3b&`>>VVp#OG`@|2MwWh<+tF*mcBgcSKRHx z-~^qTqZ<8fb=Cgv-N!(uEeuUh4=U5h;}dzbUd}|a@{5Q#G*nlMxxMF&WMF4!|FyFN`mA;cm;&!@o>G(~hKLk~2c)c$ z)g#W7WOIXP(WPi6*SIY2SpgdD3dEYj1Mi=Q+3fc4OuNLVErOjF$0{9$magk#m_j+_ z7&l(CW=`$<2;FMVwZ1`Ao1!{(xAV+^sg<_u1?%*xvWc88Uh(Eiq zl96F=!oDnNrQZ8`mC+zMh702&nOE+ge3e6{=X0w3>vw(DkDpdw?#*8uu5r?o{(iq_ z)BXO>rO*pv%yjYX4R&7%!+yw-0(#Z=AfN93uNYbudXgN)kY3>1Qy@?or^KXDiNuIZ z;!qsDMn=uEG}a0WC1o!J^dpdXS-$1rYsWuCb8!U8S^+S}VL8ZLTB5;Af(m<&B+PYh zdanHbRxB+vr%tf9yTYP<)TCOJam^57Efs)1V91F$DA$H;C`cC<5jG-ZY zKB~+~aE?ZyI+X4fcfc~=zVGHPr$c%9Z2cuGC7?S)RhZHe?x$wU`DdKYmbuPL2WzkH z%s<%PH?CzTc2Tw+i{(P-h`{&*7eae4Z zRHE_W3U)vyoK-oTBUYZ4-tV1B2?WE&g9n=b}NnA#`{GCcS7VgLZYT71uV zjkQk-#2G>rXLBx62YkErwx|>dr6G|&eC8Ds`!HbfS-5QTG8`zwDW4I+0TV+_S;iCc zuWe1g$|0|!8_dEAw9rw9zj4({pUmufh{m0v_u_B70U@rEHV+#WyxeQ>B1pdZG_5Bd zUr)u3|Fm+SR)wI`D{rZrkBM3}JBth%rts|zOE?Q$jNAMg9B@h$sM5U;+UXNjHJM$% zQ3DpYv^ifczrW)9bdDdWkBYS)5Pmm37-k3Jg$ z#T^<8JydpF4SeyCy)py-LO>wSbKP{y#YC^&Wb}vpct^B<0}~4~H>Au$zboIIwWwIb zqs$ZQ;O+f$VO`0El!S`H1}}Bi_8y!NIPr}L>4Cn+_LmCn;rGXG7^lD3Fa ze=xf@H?54>y;-6h1gIj_Zl3Fk4IExp;YKsRAeE&n!G3r zPffbDk983~!foUqUwBb(dQba@$9>ild_x=Xy`Ue6)y3tVANeJ+?H5~XLnq&W6{?_3 zh|~hU9|tC6g*TW<-9$ZFMvZ_wIj@L?gHh4*G{?2AHFs5E~6Q* z8luihj_GAYt`#Us+S1Pa__SG;~ZH4D^v( zPBna9SsOoFBK0J*5w|9P|1m*}yP7ZU0&zJ9O!B~R=Fk4Oxd|p=-P+o-a%U#|Wf0O} z;yz=$z`bAtnM39QuWIdhL58Iy+UM6pIyMzFSyJdbyi>*xqz^H5Ee2!Bh zkOQ!2iNtmKQ1+1Vc^sP_CoeMvk$5c8rX(O75;B4qyNrtRIm_D}LZoM{qZ&hZR@4`K z0D%gKC&mNCBF@Dc+4Q0?I&KvTBpAc}oqXS=ktD$0?>=VV9(r^A=x1bX-Yl{150}Dn zXKeK>8CcQ!M-G!%+tuT*de`+4^7r=63{2mA`?mkq51Drz<0d^1)0d@!4KDKer+Rkl zp8GIy^$j?R;nBGlEg*#1{E($1)1dCSeBriKW) zbu!^{p_!cu)3@(5QVYE^=Y6_Gf!{QIGjDgc`-*{a$~^89`w#aOK1Q1Hx>s?|!8QW< zogOV7?&7y8TK%`2j3iPv$ItTJaa+mFfwyk~QX!}}xkvkupAV0pW2zI$9>s6f-1ib# zlXpiP-yOREgh5i|B&=dT*3QD~HaEzIa=5zlWFmaYL$!78c<$~J#kaoevRjmRXJY=k zrcw99mB%yRdbvO#Y3kmgAJ7Y(XZ>Xy-M)8A0+Vr}+Vi}D1^pt&nrlJuofx6%D(sG{ z`88nQ46eH5=K3dZ2?@TOFC%wra$-1MK5lCN(!(^kvdVdZZqcpmYpilJn~c07Rsy(T z!-foc#yD7Jub8wKkNx?&b&di{7r>&;7_Wf+Td^lqv9T28J*GPRK4F8;L9(EF=z;xbp* zR@hK_UeLAaQ9R%0*0jU&KsGt)J}v(S3T2-$-)N~1l6c@~z}l;ll=(v%O6WsDz_ox$ z^JWT9aXFGU{xlN>Nzo-;pl=zzBYUzsn%QI}+gKm@qr34EPcD50_Vm4jdB{~uSBE=3 zF+kH_k}u5?32@;)*`ArXa0~AVu>mvle(^@#4d6JHT&J^munKc~i8|HqzJ2f8IFVJ; zIu)bg`JsTkc)Km0rp2(%fR)u|Af4FtINOjmfII22XetVbIECmCg{XTHaMu;VH-J@p zpmTU&*MKhy5S}kR$9^?ho=zIFqg%y!DpgZ`9Tw+8OFp){l zA+r*Q0lI+;)D}Z0U96%AB4-@IYl79!d>?q9J{^DVMsk$L`&7S*mY0~cH}uDcjg7tb z+OmxAZroC_GfhN?67c+5{R(X~M%_wE2}DwWFW}S7Tzjx8;VkV_mc5V~#7pU7|CkF0 zLRgn(bJt8&nbH<56?#KQiS>J#&bw92mAGmZRqCvd)wiliIs=z?Z+@h-SWtJPr>0wa zQeH*XH_t7+q`OA`Rps6 zGrrX)40y)_mR23FmYsLU{f#zgyjPwTDo}&uij5+O%KZnf{t;)LDlyPgVw+G;%J36J zp5RSUPpk{jJp)0voY4I)5eULnG~X9!_3k%&!0|2LL9{>CxP10NTY}&ZX!&1dW~P?* zwxNEK@u^LN_pQR`k4PChT8bs1XrE(MZQHJES)aWUN!3rAe7xDE>L77<)64n$l9qvC z>U^Otht1vlF=q#gfO?b26BpX6tZ$8^sxnao_Ft)p<#puG+0U1@*;Z9)%|~x~tdIJ! zIO!j3)m!BjD#YTYZin`k0%g(3I{37r8tw2P+x9pi2t~U#r9W8vX@96ygQIxx z_SO?ex0Ta>s}HQl!V9ZPDR3*-q+=4^rSALcD(}6IuJs8>UYu>X3UAVFTOTa2Y~FwT zsHLqBwAe+j}!je;6l@JwmW+6!fUQ}7? zAmWeH_3%KXq@?s2-MpzH%-A7W=PPv0n$doN){{cinis7hc1gxh-S_Z@$aGV%0(Y({ z9K_6i7B7$9YhREFdR3Dt8)^+4*Wv<(Dj~0T~1ZKgrJpL}@V=s=%3*TA(pCT3@ei+`d=sKu=7d z`)&?}a0#uq#l;#QMkPqF8l3BW6-iU;Ux*uie^AK^_zKT%cyN)xPP64(2R|{~!W9Cu zi8af>j+$fLk*qn^P`G^uFdT66_UK&=EpY7vC5slG81=%eEYBz+bzU6n*kNL-el zrNVHB!tosifUi)MHN{BXTaBsSE){m&LM!v=jQ7aY{j57*+xglg8^OF~DNikPZ_Y|@V0`|E3pX+3n75NC#I}ZSTO}zUyJCWd8xeD=YcA-t?M44z2TD$!ed> z|9Ufp@6OtV8LQwRY^;!wiG@L5#~_NeH2rUM(46liukZ3AWyzXvhujE?TpuQq3te^KQ@MUw}rQ62&l zqPB0-oIuKLefrq$O6BUZ{lu*PQMPVI?`Z9a?6Du;lZ=tJ1;dnsdk$C6XO1YUgb0X= z15n?C0NO?uEeHk^C>=!bFip!%ss!R32)q?AmlgQyTpot-((!k?^ofRww5_*7;}cV= zk`dwEzxQV5%0VcI?&A-i{LF?o^rcTyYhG4hjd#2@YZi5$DrPiRr50LU?WGf`MLeqz zQ^li!!fFq}U3$U%HVqh23IRC}8#!EW-?5G90eK_~zipV=NAkUH|8!};d+44y5uTTO zW}5@z%df0G*+HV)LDfd!bUfAFMdj1XAU70$ii*nNPKgN5hv6HqQ)>+Yn5@SeKW|@; zo;&z_dhG7^iRhP`&y+0!jO1)gcc&Gmx{pSKKEKM3n{IVu2V`^a$GoT~`dBFL1&znA zDkCzm>Q0d?_qqJ$Ocfy97MirZfT7-YW!1Hy)|KL8GGiyEn*?a~(9J8O8$ZRn32CkR z{CGg6>!5rLWGO((#Vq+t;K$VGevZ-?U6p0m@3DR&b-W~3m}N|Je=|IrgI5=C6T-|V zT0?;_kaT_x0sIIow=d~CYlV#p+Iht-5!2(7ZFdL?V+`r(3^+C_5z(-(wzmuVv)MY6 zemYb)(Kmh#48G%l;~Hw7-OSgat;3n5}Y;y*32h2*&@5i+xuVX|nf!`VCkb_y7LN(B$#*JQ;}0kW@51 zO#pe6`eR!Kg$a+CD$zVC25xctiY9BsqFa%jm6k$5TOSXFGDutrLjbqE)(!HF0B%?U z2(G;RS}*zB&Q*f-iShG&YY~vF=*@y=1?MKYe{Rioxr0I=WM#X$E6qBAN8@W}bwxzN z`@I}?p-;k)ZH;N+w1NUs@-mc5RQgk(V~gL|1v4EgC`(pgd3ftV-vl8M_N)Y*b#&Pi zO(-s7eoVBPimwbM5>JN$S%wsY+Su7JpN{+G`45Smb1>1jA_?>1j8v;36kui2u%m!V zg^8@L*m@aAn~xbKUB`p;B2|o?HppT=%9iii;gIeFQsHZ-$MtABmyi0ofX&^fhWZQ| zb!aC%i=F@jv97F`7e@hJa}j4u`;fz=@Yy3dIB;GxP?=egK*v@ZF0*B|BN>e?Y3f!k z9cVOUT;6|+5F*o36C$U_SF-QWr^*T$U#+`x(eR0)zSmx zbzTg0X_64zlWX2IG=%Q$w;uZwuBC+4B}BOXI<7jssCVx|#H+&KHDhP*t(KB}H`h_8 z+vcfXsaeB8B&Y4ilS~NbRPVi{SAClHiK4GMdO5fdS6+E?!MQ@G+To$kPtif7elVF! z)>W;$IalQwIXf2bl-I3g{w`@~C}}XSUhKmqWyEBu!xDRK30IFY@WL>h6ec;@76k?!LQY1nE%6XKHkjOy}izkRDnacmh zS$%=L7KP)eG`dK2;s?d$Q*v9yW8p5OH35sCp^VjB=<4u}p@aDZgQmGu?H`{f zdxz8uUiul+U|#MPa&Xw7cps6G@D|hrY_8tE6nvcT!t7*oY~w&g2PNq^QXl+@pV2&b zPB*Fl5t}?h89}89SGlv>rTb`aGCt1Z>wsJ2?{v{EGAubcl^73s-;aRM!Oy;r6(5U4 z0!O#lHU@y%<#6wQ9k1hCytD-QH5CA+=hz`1Q~Db{3HcP8X}Ru+=GtiX<&TbzGQE+Z zjFG>^0yYc5<%g27p^C6^*^EDy-PzuqzzlMeDgO3ndQnH;>0imz< zNF2J(YjmkrF>S;)K6OygE!B* z@JI5d#3aO=27l@hAlRK2$4i2F#|!(?(`(vN6CCl-BB6v7VC`4JP}2$n>qK*TQenJ~ z$5zD!T90*I1(_~9@&bkyfykUoS72Y*nf9hMe>C}WZM}vQLW1jQj*r{eeMWhXN;l~( z0c~1UOo`}dALK#z6*s$kyo0|bE>dk>0ksFh(+A&0uU@J1T#5G6Xaki0}h#pFcC_jTW(U+)C$7td;3YipkqKlp(WZ#v=$cVTs! zd$-P9y$8b;NG}7or08KSRkPo>6unj&?oc*V_zi#_jLBgko^Q#kPv`eDN6z zR;wH{AIdW=noCcM7Gx0(wIx&FX!X3nPM|#2M>sCnIZ4LD$zrtaC%gwejy6AfSKB>* z_$Z5I?&`k!E}$I&dgU-8eT$*;;{IacYS>e zdtJJbICK6RUSB&u<&^seullbm2w2mLk@m5?O~9TkQ}U|cIDjtJgD~acwO&;!etDCPi+yG zDf0L_dD)v`G%ayK7I9Suu^vyVI1>s{6&T2Rt6a0?pqZO0JB>7kY;yuO{c4S@vEIvL zZ~Ib9b8y7zEZMgmgK>KNEX4iarPi@N8?PuWef!PG8`Am3dqN%-IVj%Lyevwt$1CV! z)3iN?)7>|p$Ri}CRZ{S26H zQpvL3$8R6?^`C#apP;wudp}w9@(K=KKfNw>`cWY+okrTjKC2feTdWe(<0`^z+o2d~ zAwOSTo*ARNxH~Gx{UXn1>aMRsV5TF1maKN4aK~udIa)7K{6oNiXj$}7f9P3pm_n2q z8EcY4T&3~9sg?rL^9N1jeS6FhLy|tR0<~epaV6mA?#mE6^0*A_OvNF$@O;!wG!SxA zhd&)Z2W_eW4b+}As0|~v*Mu?N?Tm9f$DVC}Jm(+~!IiM*k}$Y(wZ}NzY0)4AJ36B|E<^bC|+0L+Iwqd8D?%M4U@EZ zcV9r((ZFR|DKchJq28N;{>L6!R}oqx-|!>~!M}woTOK?{8n8hKORW+F)#Cq+B{#(+ za#TWy4eS(EVv58#OUSTx$k-wqSf?mFoyO5IO8ni&zV#tbTTj)ay~6GU6_d;4N8Jh> z*TSwL;7}S39+C~;&8Z%$C+^FjnucFI;v6pPt>=Ykzd1Riv0Y#rqDfXk#$B~GG)$uhs~io^1-&{RR0c# z>_aruM-5mC)ys|93u@ixcW0Cf!3z)#CaKG^9f$FmGt?sNsQurPS<*6{*GX__(TRWr z)capv1>e6T5t6vY`3Yb7opNO+3T3(kV$uTq@UoNEddrxLeMcIV)loY%=;u4l=Mz=W zQa*3f)5v2BS`k59*(|W z&qoMGkT6}y4A*-@f+@YU+8&w9=TLRqMNA!z54mG&1mS#6q-U=f*1v2v-rO7=mkM|7 zrvzDKFS-8tGpe-fOa-s8>)+Y<2KwGB0%gZ`b`MsLBl~_+QCB^R6T(Vv=Cf9wUi{>G z7y49AeGMh3S~a*`V81E*bDqgA%&S@3#cb?N{Z71S;|=RslMAAt-fZf5W43|K*%J+LWLVmoM4}-@k_cDe72isi8E<(OTZaro8f4ThkY&1R+Si<#^+anFNqGebWnrv_X_*ymgL z2JZUUMv9T562N*CFN+z9Nsy3@H{H!iFjcxq@!T|%$Uv9EV6cqyFW?Py(GZBC{V6@J zwq#HNRuVz@ox3d~o`>RT%|%t%g?Ez~U+P#RdlsUG&4)&io`Pyx25eNcF1M!Njz6Em34@ffNw5-)%F}w<~V1 z4Y*OEqqLXJfMUE~mrtR*+sj?}YOkdBps;@B%lmLTHkPazz-v;z4=#`vcGqNN^@IsD z-nk{@+>k!*y+)DneqGp_ty&|b7$JnFwF$K=`F(XrDuT6=>C2l;x#8vS8lft>4lHDy z@2Gy|y$tX_55(xCI3;kuNaeKoA-%R#wxU>7a=0ju8aWaJeLXC0om%td*RJf`ko3}) zcR|(6hU+cBf5l_TanNYYb}C$QYUdljNHdC>BdEJmHt0^6!=pmqUu|TCU5wAI?OWQo z2V+QKA3i47kEv$L2HKusjmyZ$6=Wo{XnK`7iVO?y5HGualR}h-oD3_-)iz8As0T{= zS{TZS?j{BNp)vd6BKygN@6+IqzpWtYC|zaD3@qwuliFMCWy})uXc0izSPvz~=)HK!U`WYKAz%6~R#z!@u57A;Wso zf?r~RA`59xW|xxyDk7A{fNBQt%-L5kxfRDZ1Oj6(rvx>wfEoJ+&KH5Y#f39{eGDeO zWk4phzaJ7u4l)#-pJY=IADp_D@Ok@k!aLr~>pyd%WyU3k)wDb_kOpCZOH&KT)_oLZ z86SI7kl?QusHgx1_bdxyV<3$}Bp}H{2n_Sjp21Wo3~n2pfK5zXa2;5L5uzl0(wH;E z1$bdTGA2mD^$;Q;HNju>otd!c0qEMOmUG_ydG*1@?hIv?oMeCQ=`LxB)Xtj`&nBv> zVl!DkeqTWjeW2${iK)8HsSKe5*=O2lgSoSPi&0=GTt>eE#Y~@V>O+@=l4}TCAU*N~ z7z&ZrxPDGXv_V>PT;6BvANQ(&> zsDlpzQ{HBX?W)|{SpU;ZKL&r!dnU>H`Xm;{A;rXvpS~d>>#=5;+D=s3Ui~rCUO2GP zpB)Db7?p-eeLNj;cwm3i+(K+f(RDZpAjkRW0*RzAzwjg{;_r_HDd@8JSt&Gd3aAt% z3_JpOU&Bdwa1Xd9ZxZ1UO4C-kJ5Pef4flY6jKlpM&re=s??*?w?$wzB-BC6mTe-UG z_WSqm@z!7k8u?vLQ0S*dyS3V~@1~Q%G-J`XH?eRksXe5~23X|6j~%Uz#G;YryzP~( zc2@y)(~*fHG96q5qg}~>G013WP-ALD`z)4v9s3w5$iPiu4J1xfCj>64#cO|$K1ywv zDf|=&sa83tpLh594$+mn(yOc+4>nZ=NqW{62u49Y)SEPY#OHt^kax?;F0hTky&`eo zJ&(=kwLw0FLb+AsITHIBiV>U(=g$E9{S=HhL8l8t;G@9cNHOAqzQ~D(vQ>)K;it&; zxZAC*#Mgp9G5&w7y>(cWTiZ9h6$Av-89GE7hY|)51*E&XO9laH=>|d3p=&6S?iP?P zV*n*3q)QMG>2CPe;NJIh-_P?t@Av)j9mn2s&CGSJb=ErLx8lMC|7FJfujCR%8X?bt zC;ZWtxWC<;o;v%hP#^4)gYE<+?Qq}E>&8*?0lYQ;%^L!Yy-}?MP**8*GtfV_;g^jl z#FsRuBg6ZeLkz$AF`nE*lki5K3^zP3TaS+v!Ay)j7rK_(^dndD(a|KfXn;4NU8uCn z>d718P)yQRj`Ve$i;;0itg!lpz3sH;Wuhozg+(a=S|97(_Y>Ukl)fSKM7yP2$n4A zO^>E|Ct;FwR{Pr&C@E2x)b~Tz1Rl)-J`Nun5uPHEM(MtblUxpbg_venzc#{ z4BTYqWi7&M&w9eV@g!gTRrLfRs7N89^lUA+xoK@N0(jCdixNOpNpE}T)mapjlBI(G zO5q7?V=zk~jxB&dH13O}TGhLdvpY{0u4!xV-J20R@wB&d0THIUZ+dJE3!sk^^qvdc1vUbz%&W=xS1d!W zC4HyFrog5!18F5zR`*qnTC55DBL%Qi}fziv{r#-uLBKkpMBFq4Wm+abwEiH|m;qZUqrv(5{hHar*DX=@=H9q(VZJdKN?Q%&&%j zU65qRLSId$`iDBmV+h#g!o6fEhDsLv$DRogy`QD9k={61bEir)S6E`XUY?$Lq?v<6 zmbzT;^Nq%n`twSWc9tsNm9+Uuw;6c`;9TfxTRfv(dD=mt!%M7hxwwN(lq3@bwBnAJ z1e`h(p``N>L#DF!Qy7VIa3K-GhF>*84CjR;@i-a*`AY%}D;6jLQvUrvL!c8K3tUlK zR?JJ~5U||Yc@y&`z>X03M>W$|(~*1@SG!{YUJ!^)_E^X25BqvwrO?+d8srPHP1)ApFUk(c6~!&QYXbA{ngSguL~z&B$l1+V`MwW&yL?al^Ai z=&6GP3y%nY`x@=V!}QaicaKtMj%YB45^YM1$~=c~p_E@Jv3xE+?JR%iI3pr*wyauZ zJ95u`ZCpA>k7N4_W7H-%!t5>yzBH3r%0)SzCW}t&UsX~VFZ3}_iNmYMZ3TnB-4eOq zPIGe8`RC8uHIyK(wKlojwZM7ONob$U#GdTQAr@prkA4Mecb74Si-p0cXPbw7+A?2Q z5L6*Z%u;s{16t1)*KXfVTO2eENC1Uxyi|l(Ub`03`>MO$)1@9R>bYIdgebosPHEl_ zu}1Vv(T6ks*5oO}HGeT6UIt1r(8Y)Ig4eG~Jb!Af0wItpQ_Iogn;+C?xEQIuS+Ow< zee&WjU{`tFS24kfrP559pml%I9t7&sS}u{VKVrbxKwg{Mb-=jr*9(7Hqx8n`iTz)OU_Xz813Qq`6iVH;iF^KSUBh&9wG05DWD9vLL zLng{wVwhM@b69fmZSYc9IMZ z0rn>;5QJaSKiXZTNI%^kROF&3!#BRh`T0-d-?iAL$( zWmXG(^l=39*RpRMXMEn4Upz^w_{tFj;7m}sl7i$4!pxJ`iAt{YR07jmGYwWRGtVu{roVsQOWfNUUm0<-u}gE@ z5Dk37U*5G@>^OciN%eGI6JU!W6c0rN`zi)qUh-!X6M!83ur2U9zBKOx)5mr^iY|gO z|I?mG!LE2Ds7OIAkc_lu`oKp-_u+X6$h|?1_P2e3v>PCQz`kT;fM)e@ zNv}%flnRJPu*QM%>}p7`5fsbOdko@J=ck2|7Z)4tQTAe z%?G(*#|F)h712AEE5|xzqy23Wx+OnA>9z1-ldv@TVY$y&h-Q_5$WDY+{YC{Z2|f$- z)?viOyHpT+B(Yw=>8O&3%)&spWro7Pf#XrkJF&qk5)R0iksKcO5OLMm)iH?}#(c}8 z!zJ1!P)zYNgT;|4@k}2$bP0Q^I`ZnzB0H9!DsE-WgBqV-g+_Wz16_hl~kY-2yfY%6*VP6s;Qymb-XdR*K(R~v9rd$eGBZXqM9idm`q z?yF@L9}!gkZJeig_+J%*Rf0PbYop(Kca75g4(=?~6T6JE3``~D6yKLmE`S2$W$S!w#Ak88F!8%;XUVKu{RSEaIvJK7Er4m5yUi0uUb);SC(dUgeVB5;_x5 zPUu+aK$!E=^m~VpR8A&Y=po*)ZU#+88>!$c6+s}O0UuZZfD471iM+k>(0^grnaE-e%;oivP^|s51xO;yQ=|2(A2faI~DRPi*fn38`fWAs#iiMdjB; zN&L3xvKpo?W4_VOBJ8~WRS67k9XX5*@St}UA91jh3n^$gB8_{Eck<-t{uaAzS zUWas;&5OPI(jG`i>w889vPs`XYFr8tVOhq&zpBFZQwwADcm!i|1!&CQ)n2Ezdl!78 zLJX7*F;bu+9babw6IK|I4rot}Wt3<16Hy>5T=8XLC%IA!WB?Tk3dMqv{x>o?4gnL%eFQNRSkHhW*L%J%Z%}>5P`tiERMe66(#-)c~ZX8e)vzYgA zBcMkb6EPfHy`HBFJ0*O}lQ8mfda&XBodwM1eOzkwO9EN0SV=Dstx_`g{VW-&@GI(O zQo&OCnG`$%DYzeNGWqS^eMF6dCHltptcbv0%X0nS(|!WCfN|#_B+xNP+P;wo%NNA7 zlBDxY`dI*!uF?i3*D&bX;Bc&15|t(kByW23NdhiO{@ye>37}qh2-qPq&Gkz!FEtc8w6}T>n2CO z6nk2AX#~!BGEI*$#?b|4DHwYdQAysGG5Swzq#}?MSO_;@KTkSe7DDeM0EYCpS)&zj z(D-v+*IoQA-4=t1k(D4Ufm%~5Nsa^Z;;aYM%3#1=F(;FkV##+Q38OzVw&qcW+em?6 zI5i;%z;<I*Xk?;^i0+QhT243B4P=M_C`1s*0uIQJMmi_x8 zCu4oio3rBh!B@$LcK7$+fm2X;k_q=PTQR41(heZ#t1aNL!p+2hqr}yj)^wwLvK(ad z#v9(`rboYZ$0}SdPb3N0z8mlfbM(Q@JCD9l&+N*yY()r-)j2V0gYu&%$zL83YeZS3tWP9kYs-Q4Iyye3unX=N1heEWr~b_}7XCmibcn?Vp?-@0GCJiK?<5v>d)w zdN)RHvq>pgFIfL05+_eeD?~c0o8o$G2*oG4Z&PV=-$%+o2`rtg&xCEKH~6XRpo~5h zc}3zw&0Z;H(!fsSrrw3raFB9~2 zSB>lD)`jb}i$UsI`oIFB7=Vc$OgAL2?~`0Ck*EKR!J3C?u!IzQ>}^IG@;iyr z9WRhph2ZH;45D8J5`Oi39th@MfQ^FsgKhu?pv;WmmR>%nyc9qn-no64SY-_+4t4dmr1~V4_9^oh|2`Qai zaF;3>f9n)GiP@5}|BDt<260W(T&GfO2;_F%U>(Dt912g0M$;VRD4J$AO4}VwJO8W& zzzQKUQ|}l`LINZMiztbQk((s+*~WZ!$%^e<{0QNm3k8HH6IV>%bNn^Pz@MX^iD`4cLiArgee#4j#w!pTr#m)X*b6hM-1ERb zqn0-UI~L4^?kuM8Z<+?|;Fq<&e~k>(qa$*H%A>Ksa_`q&wQf~tkiZViB+CRAyh-bV z^QPz5GPYS53WZ!lfvG9aGsV|W722U$-EUK7msF$RiJY6iZAU7d=p^Zgtrgn5#?nO2 ze~oZ0aXz@Ym5}o4uzSO_tGG;Gkq+vFw=;G%2J}|(+@sZon0lmov#|WpXDrol_4kG2 zO*P4&=n0lhqgg6%^?wc=EtsCXk(XD@*_Ibb1||_>Kp~}UYY0$8Gx0|(V5yA$%#2h- z$QKUi4QZY)aAdd-qljlGm(M# z&h18cw}b2<_oJS5d&~-o*HVw@Vv)F*8Mh7~O8>o{39l{YX5amT@*mB$4h&@2Qg(V@ z$ei%LdWs2B&WpAa!fa3$z?bS`fOyyu;hM$_~@?!L5=zb^RZ#5}J=wrUSZz-UT z07J`~PMrbb28I}t>Iy%Y%*I$`g;Ww1F%+i){TzBjDWo7)3Y`_H{}BX589r|2ECnRe z>yG6$MoulNV@x21E=i6s%gv#_bq; zUl!jipNCX9GpSIh;OK~YkyZ0_Hs4A z4kd?-mSVdDi9wG~TLOc<-hzmu;+-DT2T*QNNJYTlq-VsuJ`b zGw^qFk$hlbG5hdJiUXkD5aq8F4@~cWcy*uiaRO9^jw~6e|3p)jgGW(HnuTUb`9WEJ zu7HXZa=MqV-0v%$I2{Jn1ScILL%I159tkSQD#oUi8z?$jy(#J)X4$*b$;>%)yDb`D zlOD|Nv=vr(-J2NrTFv#FnT$BblVcVv<~W@`x%We~)dd$km?V=W1=LLis@4<`Igu_O zuU$mS&BZF-`vFejS;B;*jP`Ae$}f5u&n+lZo*NPXliOo3!DMw=DTKV zn%&w;E3XBPC)~mf*u?dn#iE`462k~evv+PQIyktvphLUKx|oBrfs?0>j`=n_i#_0s z>2$g~QN&Rl`en_{sjY`$sxfj*;FN|%udk2aiDyNM<#j)Y5d#2E{3;<5=&bXOzvWwi zJ$Q9RU;<*C5|Zg1+wyE!9jrGnScsXl_vQU4kLFMI`W47>ezH!zh>3y z^l;C@L_@`6XTc84dJ_xX-NETtIy|&(Ic?7GA6D%cC*(|9x5hBM$G_9xdUOq>qb{G* z{HLFNrt?@8>b`a7(bt5%($n85HCu<7^5>^rs^&_``Ur3qR-xzNnA5$P={7@$@ed%c z3wa4@llHchaj?z>?Dj(-;Lx}vgD*S7iH?u`OnGHh`v%^Ix=dO4fy%cEpqOA?j^xiV z^TEl3QP_y-aL4NzFZiuTUI~b&{5PMBHfE<(Y3jdH?L(ySVDZExnoRdKOk09VXs3w0 z9E%Y7nQ`!ulP5kz?{NZ>O`V18I`?Kk6|U7sciMfwQPCL}@>!F=uq$Hzdhk3)N+ai~ zeu_nBLK9adjZn-K+F@Sh&S|tr&fw2XKWi&Nzb}&8DEGYi-Y4T|6@=HO{&Rv-r_^azS+``u@!qKF^%i_3u3sm&lfP6;0>Sr^M@gh0mIzzz8EkDXVuf zSVU8Sg|cMHHwtfaDoPqFbky;0tM4CHTR5NX^nPtv<5$l1{R7R!ikV9VGr0n zs+!(Pkn~^2-Ei${q0H~&c*rd}A9Lcc_d&S!aERP=|M$XuRpEfKIfahCrVAC#Qa#&) zw~s3NCN^5N8#Rv+l69Nop&p{H=)Ut0#_J9C)8NK*-@Pne@`O~8#@YT8$GzH%ajcfp z9^JK+l>HXj$mC=PYY%VBJcO2lY0$vokm=hU&5g#-=23Teg$B3d9h)t^ixx69Ydt#* zZyqfYB&OECv$H5&nGMtzy$EbvOHSV2X79SWp1xNl`Yb;;J~BV8arNYm!0+?Mn}=bo zO}{E+9VM^#w$}XSH5J+LneqNY)3G?^)8SvmtLeXv<)Oc_d)gryFt(6ebAuEJq~TXs zxv<~Y0RQc*0}d?)&sjL_YK}8w*=X-Z@P!D zt6r^cnn+cFu5j(0q`e=s^&Qi~oa9`k0^ZNcZ#7@s4C}I26**|Ao%H+QLn~tAl{-HzL69BgSGG$=jXI!JrBoYL329;Vt{6iuv?(F zcD}07=4LbQ*?3=Tn^mjxwCLH3{3-9p!xx7mv^?KKTMwenC!_UrNr)mc5XNe0-shEl zehW>ehqF`X`>$y?>&xHPHVwF)etCPVWru&w!Rz-?*S)5OxnLK?>75$J^Toc&rImcH zdJF${A=*bJ-$UJN@A9{G8jO5$?#Mov`R#amDBo3VY$|&8=5Bi9s*7p;Y=vypQ7)~p z^~Gd(?)S)(jfb<%QMW7JkH?T{ma3I07(q!wT*^xhf2OytwqAT|_Fp5Po^vr3*!W~y z97!|2a&kw+W+TlSBOI6V(qFrss-o`s)zxX_TpaaoGbOa%2Wys=1}(>>BTe(Aubb9> ziqCki#~wWL9csKI81A{YT_k!wAkZ02WUgQuj4~6C$*>F6un6LTpn%c53ML8SQA4fZ zDVWMp60Zaez$_%k0ta9*C^$R^bz`B3M_!FYf=|Iz4c$;#2)-E4Du+;K?Ou}(0v~BY zS|sd{1LP#v@x@s!jQAE0R8FWx{RfO|*LF_32o?{NPCgt=FGia0_+Jcid7dvw&*N{s zP8XW*Ke=_}7_f~p$35`0H|zOIeA0aIgHcCE==>C9cmx<<5VtzTyO8L=n z;M|&c_l~KKrL%3-H0QlpAlqbUk9n6@%I)?IgJO;m+F zzV5nqfvp5TGktCzpX&Dc%V0ck!>T}8JG?FchUh`l8nX552kz-IG5PyO;i$I_-_JZ~Hoxba?rpQP@As;N zuzzZ#6m5^h}!%?+ain3(`eb;3rq8N`bg`$D1GsnOXQd1*`(k4BmRx=U6=M} zeCv+;+%NDNf}bVd-J|r#Q7x63t76MxdWoZZsrT(i$}aGG`uf0m@F(}Vns;Lzednn$ zSi|w%i=XM9n;&t7H$PzA@mYnR9Be$`G)a_WeA{fg-|qYTwAPY};dKK&2U`zs zs#KMuao)t+!gU=nFML>YTpJc_^G$a)n`;n2hr)F=i~N~bgv#;0(*JN z@9VopCcpJoT<@p(n|<8yJ=GUIITt#Mr)^R+b1Plh5*41Wk@UfYfm8?U-6D=s*GDey zA2s`$*RA|#RiY+6aL z7|LJ~5*jtSI9d3tpMg`P%2fv+3`jIyX~`w$pi-^XF!;phQiJp zrFR6~!W^O!x$YfZoKNleZ*{B#0gSyLUToI8z!=SXuV`=yw#bj-FnoYf7X3#a$!l-GtoWY%DchBD|aP5VAXoYd#h3? za(!=Tr#xjwXw8TA;Y+oJu|zW8^J4NzpCy0C)(f3dv}T{_J~+2~#ob5b;yb}Tu4>X) z9vh2lbF;#1-Q4n~ns56T=Z*QCrLIjIYaJlLGR>agH1#=-o$)?>Z)IwrbvP9zy5i{n zON91tdLn*c4NMK~Xa^jfuAH|=rB)8FXYsWheX@q$2U|dwd#?@N6T6_MB($xF6cB#(kFm3AoAV^m}i#luM5Aq66RlXbAqNY7d zSTZr#{r2@mnn2@Y|GxLK>#gw;dbS>Zi(N~c9JQ&yUG~55p%JL-$Xoax$tAK^w6^OF zqYg4rUnpYhY`|wVmteI`Eel?V1oG8v9NcM4_f)Ah^#3{4aW;8tIyZE7nU??)6Ayn6 zmM^<^%H9!O`|?pgYC7P{#749BkKC{>|M_gyllb(978a#egCJ|ebbY$n7?kc94x}$B8 z;p*eNOhn;D2yv;ZJ0Cl-_}n*4s&T}P7B!YA3XnkYi7+VU{hsHCHYOL zL+xpbs_+KJr3gC;ty1?jmu(3<)_>0fd9mLc*}||gqj~Zm^B4zCPog1{d#Gya`!#Jjp<~*A=!90d=zC~Jbp%^4)0<3DQy>BTxIb4gXUCuD7N`x>UhA;cR~C z=i376(+6MFXD;@$4MM8jM>hPn^)Jq=tajU4FS^4VgqlXO4``haw)}z;P4&TXDFN-b znpfOGdY8**(%iHdXX^R2{PbtD2=M32uANpbzP-R@I=GeEniZB#aDoDcPiI0nF>!o6 zHEjR}1i^X9x{^j`@B0FVSK1)sB1U};;wi~dA{$Y|k)U4)8BFy3xi#qj`($G`Ow>8* z&*%g>#e?%%>%QhDSjf!T&#LnjTH)pVX^+?K28~Oiep?pW!lL_W#0}pEVMZpadM3*- zi(>ai4vyN=nnqX(KD(_kK^M)GgS8fHYr(PQ_kwOh^HL_`GHuJE(mwj)Z156-C{nHoUsbZlyK_zZ+gZ`ZU6I{AIrH;lXeGU@4G4 zw*593*$6nkR=b+A)r)(}XJra^H*{<{!K(M(;ZlI-IH*m=k`ubRXu8;GB3_(Wl~y$THl<&p|k_Dsa zNi|qlbB&;lEiW`yR>${@XSWx+&cSPTM!_;CX-x8woJ{%p7Ong5Ay#m3y9-Du@>NnX zlO8??>(#a+5r+?GFG5={0s^}2^QT#@$Fz{w@!<@xhTk87&Ka4OumonII#6-ObhV(x@|KerbY!l zS<0Vsa#gWqOAE06`biiz!Z>cdYn zDLUV4zC#7dRRfc?QIJ7;vymkTHZ;eg1COkyO!SS`lTsb(K^^2{mEZKt;cIT&eQlO; zUZI7KIW+PCf ziAY{f@N*eV&=my63^+`E)G`C?E_f#_LAQaQ6fyguh0^18@}N!I4i}e!-Z}+xEIDaL z$ik^Gdy3OfoKpLbkmrMViXfi_*GS1tJwIj6JKh)a2n);zC+IAYXwnxy+Q;jRBtg8+ zDqodz+WZi%l5gcytw=e5-*I!p?Ig4L9_M>dJeN!@Q@X7L>U-Qt9b{KYH+~1QI!A|b zS9Y=lp@!xGazNefX1Az9ohE1Np4_$?lfDKXp+V=R3IW_jt0qr}v9_+_JR)#iQUVrT zy4}yqH*gqxu(pkmZJZb+N0r{3;aNe)c%?e@EmskMz?p*o5lG=~Wbc^h1AFV-`CykW zkdwUnTaiH$NfW#l?s*H_8nWe9#`SDY&-}+jvUgy>^HtVORun>3Q&XKs?05EilzrA$ zvvkRafl5|WQfl5!_OI{Sy-Bqe0M^1@@qWN*Yv7GaZ9e?Fjez&lVf)wn-fP=$s>pR* zeJp0CSxa@AT1JPHY}(F`tU(g9@V#uUdn47oqhG-1K*Mna!sb#)lByFn8uMZ_4_W)Qx(XiI-q?DTX1J&c}<3kF! zMFp)K`cU7i%XZ4k^z0A#VjsFbZU;yOkg%|ad%r=xmBYsSvBJ8h$^Mo2#}}vJ?Z7FR ziCeEG+t;6MzLh+&dRzZ)ZZ225nawbl)Ppv~WB1#gFhv@W1??Xr3`r4EYVFr7wCJZx zGQ!!=DWoXJs0_fCI}8@rw7>m8J6^J5=68|Asv2f=1^w%9I9_L61}L52V88S9V(rCe zUjGf^K7n_R?O!Jj^w)NqHn+2YC~pLFe-Ay|83gL?Di%g5^A2(X^Wp^x2T9Q?dzX$* zBQWwH3C{yb*;B68V`RWt;nG=Hl(6~5DGGQ1AWR@BJL}vR8Fp`M+JNo^-95_{6Gt8e zILOa`IDM~@&r#-T=XUZ#0GKCmf1n(!ZW_2RBxK>=hf*Z42VD0k)vl(exO)|553YI5 z2URi#k7j19>S0(U@AIANUVUpA2_gs@8+;Eyt+mK(7mS{oCY|(QHh>_#^|82dHgA&gn&gs2t@U1TajCn$5Bf<0aE;bjm%P%og6DLD{)bp^Dt23|0Xd5(qk-x|=% zZ=RbAQ+?=qV%XC=g^;9Y`O~lz2wnr^^Y|6K{kk{`rs3@96iuX4@Go{WX|7b$^}jDB znE+853=XjpFe&=!cM^mo67=pFkT_CA1_TqM0AK;0?3FU5g!ut#gve2sjczTHAdKjb z@tveup%3KgN&X=b`Je=X1zLPic=?Cs@*&I@(0_>Ny=IZI+g2yhBoGXE1vrl9+Co0b z&8c&p2h#u9&K;lKg4PrETPIT&o8dEOXcv5SO!L$L13Ag1t(UcBJH{8M=NCR&WlmM; zyErX&JSZ34YYNz^7-V6)lnBw1{Jx%pE3A<|w z&M_P#{?q2%anS3H#_v9N1eF*R#z@rhi*1EL3X6eQOCXFOr^4{Is`R()os2Oi)wG*4 zbBNpe`{Rj2?TV23c$x7`FnBG~QXuXffKlA?yf~RDpIH0t-Z;Ki1McWf=Ec0NrBCDp zlydvB97ZPZ-qk(bNE42{BW&Z~&|6{Naa+iHXu4hiBpbW{+OV{2UtKL>vb3~(Z@@V? zIGC58-_zUsCN(u)iZNttOy7_hE+k|-5)hK54ssuQs)|gAB)$_3__KnL#1FX<48Ro` zdc014U_8O2(GtLe>0GylAC|gSgM2nJqBJ4)6jyfEoK@ubpdS@;avI{W`213h# zh=U=Tu}g{wOQOqxkW@%ZVZ5OcJ^!`f4CIY|q@J!cqFBO6RZT$(N}UC*C<{gF1dk?5 zl!V=2W#T6RP$*R}9c%)5BbWrx0xwZR1-`6|7*zw|BL@;Bp+<@-CF(2yO@ZI#)bNVz zU}(~c=211IviKMq%rAJ9fd!8hTiOl^MX6&9N|_$?%gHcIijbo(y9;B^`Y$3@mFS1t zUXc({<5%1RMF9k5kA5YAUqO!<2@M%`@oF_tYJ67oD-sFNPf(?{#F&g7EC({XR9yMb zS^!Bvk%f^R7a&|ytp>ldAq%35=9j=1rvwsIJI1e&2K41cOQ@mApey+B90(YGtS~?x zQU=|?HXs2qzm@9>Auxm_ACUDclAsJbMvM^bLxg20X@GG)N4ORsk!OK{S*a0e7G9b!dxz?6<5hq^!-0VI=ALfzhn5}%DxRR)+}zb+^*l2THJ<>;}4 zGuU8;%qVs7OLay6%RWa*P;iS8kJ0zW;MI@JaA9G4fYzl6`|yD^L%-}DeNsyA88!Am`xX5(+qPA|St4w|l&j<~esgRYf+tsHCJMDLEN_|GtGLJx-AVk+#K9cX#-$ zfWyz2H>ot96F{|cz}Ts{*Dl2f8zE+t02W1Rp~;tpS_yH3L+~rKDYeV8k#6w3-S`Iu zas?DRsFnd;qzo`}3hiTsUFmF+EIT%=e!R|o1yhir^2qZAIcvv^X6?qS2kRwLkb}YZiKH2q_?*x-7d;8I-ed#XV54)!=C`@W-zJz?cyIYCxCDLMu7PNE_Wi zp?TaX#urpcT%ir@gc5XIqk9Z8U6gzluqH(DfG!xE#^E8rRwYQ`EZ~WOm`(=@P2^o7sMuEGN%S?i{hu)xm;RM^DN9q3gQUZg7?3eYH{`h zv?{5|T=Emn>bG!%jR^hDS^M9}SGJ&|0^_}g&Cf!M6|Vok3ys_yFuJnrrkFoCxV&qJ z2h2+srQDCyD7y>tz%B5iQLYIs=WobZcZuuk00`mhV905!b zUSt>A@)RY3`qQ<^GJ7_@Itvh785@xyN(S;77y+D(N_?3>8=VBp{@16@cNu7;8q&>4 zdQ7VglxwBNWp{_$#9(M03$60Q?!?|B@PBf_Zkw1L=u2HDUcOWN4+-J;ohoRZTnM9v zyPuWv1gIM$Z$I+<_VEQSji~=5Q?rR`<5W#eP2S-0FZV{ORCmMtiE8k1%AuGo!lXQZ z%3-3&TiAB3MyGfIygAJMjGRR}JVZWdK-awlqT9v<=E;O~ki`fg@v)e?76~ILDg@|t zC5R_4I)f0~A`5tZeDRP{5W$qRkWk>&;qq$7ozbHeNlY+5iwq>xLg?}vF-qMsHnjHT zbtLIrQQh@9Od6nu36GN*8Gk=qp{@2R9(2F}!)C6*#|NDglg_m60iZ8D6JUFa^?0JedRb8FL9G2_iBCV=K7;1no-JEC3WOvr8g6{u_lt zTj;^^Bh!GhFq8Tw%gZh+&4cUMYKUh%_uVcgE_S5L@0HZ&{r@9S3?vqC(P3KoexQQ! zvKKJZwF2@raX==P(~G}_p$SA_JmWP8Hb%K=f{5?4zghv7uQ#?(lVWl=%;H>S+!Og6&xN8bd71LQ+o^NvJs%)RVDC*rW(NDdO;+0XMoKKG1kB)LaE6OQI}+| z#jju**o}9x@DZA_wp~^D>mkP^qlI58-z{5g$tiU#Xx0QZi^oW@PFmiFn<) zefeP$q$ZTQcyRG#*l@hrzp>M)4F{^9VE@xXKTJp+cp4G_k5%&KLQ$!Bp`(EVPd&Ur zz46Qsh*is>Xf(@?eL9Dy41x%%Y?gpDwp1AaRe{N;0FxgroeqdUk+^J*X zvmg^++PU8u!6u~LGT+&-ZJJnjq+mKpXi@CU`}Z$~@oVwa+U`ZZ2ceGXV6R?}Rt?(( z`-37o?HXMNt+B%GDfSk>xkYwIIi>&Qmj{|-MRw(Ch^4=KOl6x9S!mR45|Ca!zG*WC zXe%>zG$BP4KSkHkO^1Vj>(t>KRPcN4ZPn;^qbh`~LL?*PSWdsMTC4s&^c)R(<^uu;RBcFn=7i6zU()g zV6F!#6COrQ`Z)_ak1#S)Bn(?aM>`X@iA@$fML)T0TfMXHlIFjAZol@QJ(V%PYI8@( zjb+xgP#{;>B(tD#mrqM&hld7^CWGt(`417Eyjjp}g-b4t02CZ@Noo$V4qB1=_oLYg zjKDuvuVNHl_JA`)Jy%^rhNM%at!@C;i3DXNPYGWr53u$TjaTu?@DjohTVx>5y;t#p z&i3@LP%p(@cF0g*Fv473dEokI)Qfi-_d` z(081aDH_wbA25E{2ob!9PoXm!hDr@}%2h`}7l3EgvnE04QR=dU$#>>a=-_l#w>`<_ zx+E5JGavX37KhHmv~6Jw2)BrRib7&oK1PJ`sFgAQC!JZ7NZ_a=Bna8(G2~ik60%Fq zQO%8GS{We&yh$;Frg`M;LO~OVSROT&5ammnumLZMmy|N%TY#6WlG%|;{RQkDOu{ky^3>!mwAp?uP7I@p^7`TNg3x1vY;uQNFK<)UmedCIfKOt zdgO*6o`<$%8A)2B+~6!Y2rY0>RKx&`sU0mA&Nfk~r%SbPoCUSe98+mS)+Iaz)8DU9 z#cVR@YAHtkcpi|u!5PY!sVWM?VOVgE;Wqays4nnQVLTi)p)%I!ykG2{_ff^5P9VjC z7r}_*WzH^EKU(+YUPs*dxgw(dUE8=>%T1MQu-Iyk!+kEe!kmGGqvN_W(`#g#9+LK> z>a6N!h$!Umzay5JC+{8Wd_$5!^5E2@@a6 z`HgB%d#Gqb|9@<;I=^P9tgcqCn0Ls416?zUj88%dO5U)czloMvE!MPAd%@sD$B%$i zYvi{nOc@ep>HKMq5Rq#mTYCQwS=IUch<~HBxy>Loa~MHi(7?m};oGjm{<)$E~2^ZZFaQN9JoNhUjz<8ovpv$p|Vq`4Ogt*?>p`JgO!C zN=xuoW{>sBC@yg&34|#}b+5*Yand*-n?kn)s-d!^mk%L|r5}5!%a>2f<}m^!>Of2M zQ=xPCZE^v_MZNn8t&W&}GPY zgJP_RU?9(EKyGRL(Nv?uw5RrxN6A0z*`($IS+D@h(XfSBUABCinBWg%@fcFjEp%j| zRBbu@zfno(g5jVHw+wz8wbww>Z&3d*2RcRM4qr<6f9SAts_T z455ow_@gg6nGj>BVrjLY+W*0B{!^ezHjo#8r1w_qvd2FrV*o^nH#e#JO?v6l7R$xe zkweBkd@wcqA{35bTm-gUO->F|*5hJ(>w2du|Y3|?opL-9uS65I5ood|H zjeD3S!+O8LZ$)_KZ0ACs=$7btkos+1^mQNznrh}q_PLi-xg_wS6RpdysxLd29Wqd` zFBD{OCv}oiR#3yn2=dxaBkjpY({oK#zu)X-!h6@U|C_-7vrD&(gt@vmmkj?`H^2*L z{^6Es2wG8$WVGSTtKdwP&yR?&Sb`3h!!WQcn}o{9M4AWNu*xmv#m<< zi$&4RPQ`%P;?|vXpDio@&(*5_({}Ftk$UV1$WX?Y+Z810aT&-Wy{B>!2tfKGQe8@0 zP5ega-(N^Z`Oop*X*hp-G&Lb|zG?4xzFFL|$8|1LD|-HcG5ruFn|@k?OY7B%rCaVb zyPa)G&PCZ~0GC`#evLA~V=IUdKx882OGtU&CuJrCY1oRZ!wYg5Sey!yGLslFs<;sMZa&!YfJ$r2~DBYCnm5$N&oxjLyK3CIlQMGbS+D5Rs^p65J?MFCle8--d@~ zu!uvoE51cjwHZM7fTQ=>=-KexGYnt~S!zAq@I2fiH>x}QD+r&hK>Q!+?srT659un3 z7glHBi1+-t2N{fbg@{ZVGEjSwv4{+2K+cC!h6=11KbSsD*tj?nbzfWN^4uqAWvdEf z5&y0MMH>KH?1svMQB{2xP)>n;AI2kF(x#v$rOm;YBU1r9hzlAP^afj6t4%`EhQwC- z_8*;JyAMNT(%%oQM!Xv5y~SD$(Nd#!F=}#cBj!2{+VI;;Km0V~-;Q-l#3nZ>vyHHt z1Z6;A0~z|;F7#lQutg-sW#QRG>cN5@mSN=Cd-*%IQyv4xGKz$++o+-GKn@}>hx{)U z8%r-D8G{&#{`AfenZzgq)fWFxa{h-=+W0Tk2+_{C0zc*<>OE6>Yi8?5BbU#?_i~@5 z`10hwjk?oWH{dJwoAO0=*w?397E*Ov7hPI+C+_&}T8C2Fu)_Y)HB$aI8-{o+@f-iU z@&4bmU!gwq38ViY{Lq5X;8#Pi;n0VOJ7B~G;RAjppmBQamIcB)(HaEe*Q(*N93=1% zAY?M$Aq}<5k>rB8=b=_Nu(-H?WZM0f6OhD6Jyu)$$FP1 zf8RoNXb!)^(=|Da(&IA75UTd5Z{slx$Z7@_aZYTvJcGiI)ptdfa4&ubU=okI?Jt@- zLE}V2OzW9AtU;sDp)?`_r(5^gg7k{w^$O$lkXYpROtvNy{d-=Fr0r*yU+@Ae%Ufza z^f30E74HC15G`R>81Y7nBR4XH7*UGJt3q;z;7$^CU}O`Jb2YjincWiJzbbk{Ez)vS zMqM7z74e{DVU6}I_~p+a_NYfc`tOP^Wp<=_EN@HVh||Q3!!oNGr6IP`NQ$^k9_Z%Bt7odH9TO-B+$2&|QmB5hukgb4GgPQrhdk!9O zFY;;x;`W6!nble^NW@D>;UT#hEKXXbLv+#*A&jVg^`@qnoF8v^9aJYiI>|LXIOH{D z;H3sKdkA@rpjRo7j|d&rV6OoGLGBLaATs4>i0`5ezRM`HC1`Y@uNOv84Q1#cSlyJ= zWdA5D#&b29>v*%ht7RwuBD?Q=8${}@O7Z^}I{u|HAOVap3~iWoY?#UBUIJy16q?Ve zaR3`&zh9`Q@TY%}brLBSngvg}L=Nz5${a&AIzOVQ7J)e(#)#$n+)6{NQxZR+`SWk@i<^ngP=Pq%$ z1R*U`dgbS9@6j8K9=r184xO{ZTeL=ATl_wR7=o#8fwU=AcXYolw-C5rSV}38P*6HR z_5abW{%4a>v3~}%|J!~g=!bM*(FXMYQ$RZk?fNmzqq+s ze;?7Yk?-^d{C{h{#PV~Fq@#NSlL-lq>5ZJ}E_wi?X{<)>;?PHywZkjffQ77us&NEU%;1`Ve^)9BKDjwd5eXC@_M;v*_^}r*1sjlp(aQ;C!5(sy zMefTB&;^8Dld$9_B9QQquGWs^JLZlykxG+ z=3sqfu45|7Qc9@{`BC!!eg&X0)%D!@IFE@Hzoy>(soM1!5yIPx#+!gV(^*g^%mAB^ zHLD>nQ^HV~FQ>E^&=FvVGbZX9uCh;3+G#W^4|IJ^BM|eYdIe}%)+9~=#u$F21t8dzy&R0}n<8Wc3> zGe~eZQ6{>14YFvwf(B&_ljqk=zop?7WZ5cyo5ow;CkecW6Z08VjB4aHXi{*?^eAwW zLe@*andL>A;}&@!!q=D-VdNt-4eAx!Qyh?SgOm2^3TC_)szqwYJ}mCX>DH_T@)=c) zF4wESf#?GWR|PPwYdkl+c@C3ql1_pXrHZ(jBV&8in(C-fPUyL?;0(HMSsqxJk}X~~ zk&_0O^M%A(k%FpF!vYtOh7C;-Rz}_gTJuKZZj$B*D=TRr72!ZA|H}JP>xB4W#D3jJ z?EQvKJN91IZq4pzAB|yjk;e4@(%h(jKLtIQG&L4~pq%tCS)ro5(Fi3$)r~q|LUGi! zZFhm8z;7eGdbw-SYa(+$EK_Q&l}#Aaj|Q`K$C;#}Ojxr*HIX-|dchei@GmHK8A#FT zC#VZ4wdPU(o3d0#{l&9Miu%}-x66lDKR|d%y3bpMORkuugBT2C)>Mc{R)ty*`)Lp% zxLq}QdQwV>OF&6Cvo8L$)q&}8_Gl_)FgF* zJ*yv0NmZDkuvCzjcbUoO+Zbe`sfT4gt0R?CHEUtv_BFF(@giSqw!(sS86CG~Txl8I zKSuBq=pk)Qe?DTr{VkEn^G9c<mo0GbKY`Y< zgyy&T=u0Rv|DV40xBM#dFaII%dFm!&?aRs-sPU);73hj#S>OR*lfR%a8b?U9em@Jq za-x9^%rtY-JYZ+bTG&maW zKI%uoSFXyS71>c2yA>!@WEjYB;^d43e6a88+OXrsdLW;H_ghDqM4clhl-;BP*-=(KoODMWavE|rj-KBRYnv=J2pn;{m0s_BX72$gF_LkpNSJb_%(3DbnEsclb zEaxwFlfu=lH5BoG-}d+nHnvSeFy3Rexx;-ms3WU9Uc_i+q_W`6+R(kB&4vAWs2Wq= z3)`Qh=eId(zoj-AUy3($&{8$e3T27Lh*MFOJng@BEp)~|MJ@M`0O^c>Y`3x;u~7CQ zfjH=G`bn(y@dk3&XRg2(GE%@TY0xr1@2MhVO9V+G<*=)q`_6dn>at2Lw>hKVf`tSa zd2+dDmhJq13!6w4amG3wn~`#YSe_y3FAEREcwyX4vKbA; zM2%5_(j#&FyE%(~Ysv1Lg|r40%YfPfF==7S*I>oJR(|Y27{RNFTJg|L+^iq zLj{P`?DOwUW%x}L8E>65tR}of|8g%bW}%!@81cLlc($xlCYs#;r4DC`f7%bM38Wvg zE-Ey*`ahNP)2V(s?aXsn61_YRu`3ESljYx?rA7h(h&&=i4O%j}U#76;Ja9+E80~xe zO}~47nxmZZHt-)+VQ#5^Bn!>V8}W~ipLJV?-6Z3Ll;$X8eGQlxgBs7lo_ighJBofI z(;Woch?LKoa6yLS*`X{7$9Wy)NR>6v@_9nfg>k2QqWiTmlv-Nyi8}bJy~!)(wP3on zc)xG_`{Da_N2Pa88hFh(o73{eMWztRAD^k|J~Q)16W?d8Ym+^uOkt7B%N*5iP`$3C zTi`<}RJ&4GdEb6#bK9{B`;R zg$i>91bx23@oX91OXo8n!cp>%HC1HhEnyvc_bh{0l{Hj;iwgL2o=b+5D<@5j+eAy> zS{IhlowA@}n{8N9a5w{7QffC`F*4X|wA^1t5DS{D`Eg%zH;C5I`x}{wE`u&kRNt2L zKua0e&_~^2VkVr=p&~W)WHAeoT5IAUN;Qc%!Sli%0W>Rl6nzu7o`2uir+H(4)X#Nq zI(dCMd0&yw!1G@hwY&AMLVTgGs+>jtR$dlx@05`#Su%q${hF25#>Wxm7#3!Pamy#< zmHKxpfCOKldh_BtAxvq@F74BRHyD%Aq+n375eHIp=R%+&yJRSElZo&L&+`%-R` z=_jEFZJpO&EMFU%wX4~9wlPddg@71=fMDFhtCxH3i6!su)^e7ATbS3k^ILtO57Jfl z){gI$)lAl9a5H*Mv>NTk4LgiKQI9qBemhrp=xLmxYYMY`_0pd7%5tw?hlTHw^pB5g zlh(m_XkV9%_QNXR6L|4^6q-|7Ryv7=<q-<(j1_2^&keb6dai_dMXe#4bgNBJ|xgG1Es9~DvK-@p>rzR?)>@H2|&N>XmCG}huR9g3i;M$21NH1`NR_2h$ zctO=>QSC@w#u2o&)()nE3EKJp}})Gk1u z=e`(fU%s?0QoS?ND|FZGT~uzk7gBeofR?J*#bT90N2UBhe0XdIL6o37GLV}{o)qo? zRH`AgL?k#ms2V6Cz-V|x^t_+%d}Id00i3vMV(7eA-{~9ew=;Py<}H>xEa5C_H}B>9 zEgyXvx35mw{Q#kLzv};&YKVkzNP$1N1u%aoX4HcB?gQPLrJW& z*q<8uDNrafb@;cw)`?vWG`RoP1;gL3SZ8Y3vr2A~S?dd~bZ7eY?b!SFau07x1DG*Z zOrYsj_+_N$5NVHrOwdRN4iY%+ero7AK$a|9nQSynbKq_%I#56(>3X1$ic^81s0t~& zmFII(78JpN;+!%JsL?Bv_rH2Cx*~Pt!-kRTY~#HN=s1#d%kNIKTunxNS#oRI*m12h zvyIPSbtzD!YJYvBdi#U)@>1T$+JbGv_>lsS-SXtM8`2x`jBlJm->A#vW#kMIMrxOr zdwqK|eF?*B(71j9H3t3^_lxG`15+8wXhf19_ztKAk^OIJP5$w&=AY6wMWmbnsycLe z|Mig#8DH9UpKr#=!^)LUb-KuyNhO)dQGdPt?VzO6?+E>H=>W*rjQ{wkmJN> z;6qYSPf}DlIPLMG*nt&r|Om zE!3HWoJLb()+N4m*Bf`$+Psg7x4#ItT0NQn*_yY@dJUbCPREAj-@3VqJ0lZEi4n|zuVGOEZz5wUsMskZIT1Mp z@7R2Q9<%QIDYg3K&FbkL+kS#ktFCcgkKSDw88r4*KrA$iU!OO{`dp#P2@{=xT_MCOF*zSxRNwXkp#BfK^_)S5`Y- zl54%m&JB;^j~O`%AzuuG<>(sJfcaQZg|DWlX>S}D4=)iQ_iybSMX`A{grukj@JkJptp5!#M>07T@GmlTT2u+Z% zyxm6~{qm+oUyE!0msOhJBZqnEZrVBS3kGk;1jQ!AI!t_*;g{NSRWx#M!+*iPGhldY>0f+NP4Cz>dSO(1Yf+!>R@f7uBFrqAKrAQ|=tEv+`U6z;VLKh#uNl5|u zmF3!S`LXVKajaR0^0`4W z+W>d7UqW8Q$zo((h{qFTsi;j9Tmm#I$RcFWov6+D|1z3edpjrzG5vKaf^lf0|46u>3TI7IS|N2`rJquv4esLa`pY=6tUsOHu#;NWiW|>;LkkN+%EqCoeyUfCLOvjt!jL)g9}9^^ zA1v)Z-SZPyY8w_Dq8if$-c+m|g`J{?$dWm*fN~J_mZPJi45I2t#JIX_|DPU5il!Sj z42JMi7%QP5GP}dcHNoZ2MHGOAe4(m>7SLtZVn)f~GpJ4UaJX`^LP$AHCnT)q+LZQG zbh#*EG_h95SBRhtu{fTAN*0?58CoA-{N`9EI>#hQklkGVWuH@g26wOLoyq;N?@#P2 zCq5@%xbUV0Bdd+>)Fv@gZo{P*l%}Y17m=ls32={6leQEK=91@-7qRf=k$=+EYSX5I z0%E&0^MSJc7?YsWt~@KpNwL?90_m(xsv+TemSX16#9 z_Bm#jgTf-!V}2K2*$9nG1Zcd#zr^aYp+2>|l+BWD!~M@H`i~p8;5W~BF%q5@7S)#$ z-X6&_@Jf^&rG(m|)pp|rRT{5}-v3Fc$W|JyY+Sx9>V}C9o|`h#%1j4LVJ>f=$NVBA zD;CiY6^qH%4DV-yy+iAggv|VbrEng-s&0dT*yuEqe2`EBOXYe@PkC z3YES=!c3XDpXEh^88%FldL@!mfE(jvB`^E8LIi?f18RB(VW-i*P6ckz`FbRGeo|ap z<1evX65LbKxmk5LU|iQ=zU4#NnVW6_58i28GMhI%FTBXX;Ad49W4s%Eu|AO3=yl!O zF70TZ+OCS(h1V{Y4n8-{oPMbsgyl&fHpopJP(tvQPNaM}IZKbhRC}VJ9npz3`{cwR?c?|(Y z94Kf*FF=|>GTWuc0!uGE5*{s;pOhRG;)^l^OOn2^kIQAxs?R}6Id}AFE zHJdN;#=3}*adkW9M++>5u{J%ajM zB2q|Ulr15aM{^Kkup!}P_zyLl<>aEF1X+7nvSL}~8)RAd)PE|(nF@!^7d1ZL7!sZ+ zBqeO(y;=;epCG=Dd2)(h%xCwnprwn^$MeddBIMOB*harJ$So)^UgPbW(SO;a_dHis zE#wUB+BG+)WTD0==)LOEbW+OJdo?oZUcU4A#K1A5!sfIFm6Gjk_)i1iJ_Umeg;v|C z$TI@7Qx6i8{C3q1od=|go#xjjaA7GwmHcxNtSDqnP$7x61&m$fbU}yUU@P+a2LGYA zp9_+Z#~T#BqA|%QKMKx-Xp#f95*3WihkxckT^voo9S#1E%bV!QM*Z7tzi*(++)iuz zPc@k+Sb5X(9-uqjg2w=NWwgz7G-$}7U{B*v)RDx*W`95di_E{K9CeheU@dh~2M>Wu}9?cz&% z36eg0D!eH8SQj;#3ss;BC@6zFO-qA@Dw3E8U{nQ*GO?MA%&jX2|L|0EJRx;Wgig;d zeW{b8UJ-s&{ln_W{r%9IhC@hP)Vf5mxAw-WYGJ0hxJWV-nMuYIQZ$T$G(4#=H?MiH z%TA+7sAH%igOPvJGSJlEW9e7uNLcME@NRj36Fo5w)X{^+g(!6ug@>AZlx$q$0EVB7WDGbL>5P$$yJs>NvZuaZ8^++> z-xyDLz%4SU92NE-ViXu5C2#a8#)+a+TU@q*op`(sN}51k!YNoflvrBjaA>&)gKllaKk)6!$G39kZlo1WXTokbj~A&CM4;wCn8Av*V-nB@xJ z6ff?K(qDLW*Fi|z{CY?@V9!t7r^XZ>89SM8zWUU3^0c&9%a^_zIW4cX<89uIFapTp z=EVgq37i@;_5AHBSWTH*8XBTLbHp7!v-_`o2L^a&m|SMQ`~kS#1@RLN#KD5G&$Y_E z;g?MpF@m~m87fSkStG^H+7n66y% zNUWx)*LGHQ_~7Es!Y)=P7k&)sBF8M=BC8Co4lS&W0WFSs{rL4`__GGqD1%kJWlLF6 z!<;yS)m*;mOWz%S>HVx5O(*oc_Ac`>>80mnkQ-*OqKbelsD^esvXl}{2F?PPm;%(C zQXB=p4OiBWqeiztH~mxA{}wD#HSEk~ul~)x2r%>gna$uZ1`qUp7jy3C!P^dNGyn*4 zbeObra4D)PSu7x-v%Dhu#X&iZa;Ah%8|B;(nUDGeAM@ zb|qC(ZAdZ9(B;1HPvHRtw2>fx;CwQLYKer57XW`k$z`#iK9Ql>%(bUUpu-_)DfW+0 zToi%7XOzAv0il-jm6wFslTDGu1CBs$-~9=)H^aOfe_X>D!s-+uGoK3@6_ zKQhTMw9+f=Jv<1ER+=(1iXwxDtsf&>jC4of@H!;Qa-5UFDddr1;1##7O_Q~3!7mCk zSiz-fG-)U!!$`1=0lC(X zcrQhUTx6*rHxgR~mfS=cd2+w+bu_3n<$$__xB_pdM7M02=xB(iku#VvXF%ioQxpan zL=9K(wj1^(Y&8F+r6gWE7dOV7(1`83P1-x*4;^4<-Hnf${Kt;e2reO zq`gN!ztn2XeDBa6tP3N^jRM+j8uq1SmI*>Z^sc~f?IGM-ynnyV(3OMa0RmHGz_M#n zTtdP^`?v68>+>F6+te1eeV*yu!o${%`zfU<#qyC(jxy8K@KgTCu#{rraZOAYHwwul zGFC@h8srpz|C2$|6ju>)a|Rhw(DemDr~Zd0rQQZ*R*$cj(O1lz@3FSxW$bp; zQbYj7fSSjN$U`Ugd&Q6T|(f#l;QJwJ)aOTWiMquKRPzdlxUe&1q;K^a~RjKPy70 z?g+9QMDqTM-?~ces!ERX*fJaPtIp*8-O13<&@Co-)V9pG{&47G&7mVFzEpda`RuHF ztoK4HM>v-C&CX`_y8fS;`cz?K6 zAKZ~6*}LnLy0opg_t%yv?mUCSx*YA?m$xcESGdmF^cOe7j^xKrobb$OPY^SO9~?5e zfBORwfk5z`{hU{1Hz?j~;2df@P})~)710n(>s9TwyVSJTU$FNqnJ}H~)@yj*szmRn zTF`&1Wp(FwEqUUxIN{1{7(lxT`du;{^9yRjaGE)lOE!kQ3bb=|tS%Aq6JQ5W zcnXIT4zEhSUp2-6JK!|lSEpoUWqlMFm?-7b57(O?t{q+&tel5UMUm~*?|pvhxVP3@ zxztEM=um$=ziL_e(4j+MtmDUyS(fPW=~C9$*CW33G4(3^^lW^oiK)zaQVr2ruDk(ki|8aDTz_Y~w|3#(5vF4IB@^k*B&O8`uc15${wk?NNw3e7 z()%9BhN>rhe>@!8TeyGkQq8HFtq=Cw52ZH`C2x0U8u+aB&<%^Fo2H*&6rBp7HTp4E z)X`t-SNr4%g{i4&BS)i|V1C667q~+r;?*U+M)$yAMxjefj{H?WFo~CwSG#Xio|W|M zo|?)kg^lbh#-sJW`tHmhK76?9iR4=M4c|3gzaHeUFC@?Egdl+AJ8pxwI`LECMolJd27U6zt1%xO>fd07H z*b8D}R!&Y`**7wg9co#_B}^c^+@vc9sB`mQS$#T>s4A3Rqyo+f$kQunf#5WBY@mkI zlXo|sB_v!D5V+&w(tWW*V(gyr&NCz|-oE`S-man1aP=AZ zU2Cg_^v*4&{mrK8y?J|NSqE;V_P6?wu-V@>-oGckzani9($f{RUp*=LUBquiq)Og&wwbxVnoLJW*X8bW*=zl!^iGlV$~ZsFPvH7`TBsX3SE zhOHtZB9LQ3^(*c2)ORD)A=UP}k1HrDGJtcWUJa~T?P7+leBHji{Uh)M1F|7s<|Fk! zrw=q`gKqJ^6tK3j;g!ecIQK<>zh*PdBH8EjQU0pii$ju0egtBA5jz)TPp8@ZN;nyz zF}~-9y5#uqUQ5Ad)BYxt*M{Uq7S}ma8S_*BraW(xW%TytSlVBN8vfy(;@ z9e#urwFJ=-3*QL~I3xi*7CJrEnGQj22D=i&eLRr;h9PU|m)bZ^Cf=`rEu@e{dhwbG zJZXs>SobmRq6OmN$T<&RAcY@zrW?pyyYE$NvH#$Z)KZZ87YJ3v**ERP9KQYKw`0Gb z7w)~9z5wmeB|cks_b-SCz!|Nty*BJ%Q3)9;F?wRfT`kny$B!R}vy{?ZXFnsU8GhJ2 zGdsJYhe)o-=z$lhSvn5;8NRme;nFp9jY1rausw^M3!(G!=E#SIK8xhFz7F5d)f+>z z$k`BZMuZijig9jt@3oO+D%J z9x##V?b}zzLhpgEPJe>!{ye(iF;kzF_~F&}!-Tvwdf_jQO$B0D<5#*U>>h+!*7$Smr} zcF;)58F@r+Gf?`W)OO(5v16xi3f^vpliF$db+nDREjp51!8A?bOl9{MjR3D?DKh$W zKV?^`T01f{vrsl@`{~8?Xg))0kl)xi8Fo~aT#Fpu3?2S4CQTSa&cuRp@Usl+{wjmL z?>pc$L}Y90Q+Bm`M=lxBD@iQjPf2Z*cOZsC4nh2!r=PhrAs{R~5oeLK1e;a&l{;q! z1(5>8Eb`uR8Yr{Bzw$w30PHItOyf~4=DC_x;JcQ~=(CE3DDs^O^nE`tOmN%V+oQ8T z26YTMl*|3wgCoeE*2uBzP+i=suZ^l06P^Y#d4551b@~3U)DJn} z^UGUf0JEHD^UCyUeXAGEkZrf)j0qu3zQZTIkX^D03e*vYsu9t8fkd0an9wqk6p->N zy>d}HJ?E0qWyNsDwE%wacaSDWjdz3nMqb3*X=+1V92*-$QWp~DaNzs^sHcg^@9U4| zI~!FyjIhcwdb>9=etxwzHB#E+mwAImM(!YIQTlAP8aoZx+OM_Wulb}uv`mA5c6ZZj zqd!LKeaCfu8@_!@*;%OU00Rio6(~XpLsC*DtlDSJbYmJK$@~fa`}gN;P>#=vI=qcg zmzpkYVqDoA3%~5MT?~hp4S)&n<(D@;I!=$2_-fA*a9pI)=Pci64gw$tlCLigFG9^- zT`TbU@Fd0<{7zV8*t_W0*O2rLV&p7WFKqejcyGt}Hir|Y!aav~hAcYMep1ubfA=L% z>=y@TMfUgXx5|@u#OQ@z_PX1$Fa4-Lxsx}%&AaHkQR(xo$+wLfpib3RqNrn?Ztb?V z-(IiZe8V1pzvsLTlBWh;d;1_0U5^(=P;#vb$cUg8d%}eMZiDe|00J5~q?Skz;DzZK zCZ>%j={=nE*6=Bi6sYU|k&}$8)5hDyCx>=q7Cn0_t0>q7BqZjb;)b0S8wFSn zh1q7Gu!rp7^!C9aIB3RpxMnb~Y-onp-Xl0Szd~7iOYIDR7r%X91RYE!d9@E!d(974 z>S~wotzWK!f`rJ5Aj=h`@ABP>fhsbz0lPrIJp#L@AnAK^Nb~PNgVeLPY#}wKIJEBz zDK4gQIF#1N{oSP0hFDglUML+R(Qr7=w)+0Gw3s`1k(66< zS=Bi20U8MXhM4|Cf)jzfy4rNK zrJ%m#tu~9QS;NI;sNYv04v^gKy_o&0u-%(dG2VVXnY2@yyw{i9vsrl&GB{G|U?boC zkBU;;6vz?ZJcbooV_vP$Bi+bUJHyDhzWPbs3)zf+_eZ@>{@K^2O$>I`YdVOQpq#YE z@B_Y>bYgP^$b{6041S+x*zm|_>M9d_^aQtVemCHcrA9$1sfA{a`r%909>oa`yKl;2eRz$@2)(0^vGL;-4U9BO!Z_Wg9ENcc5g(|Y_->hCp1lY z2IXB4LdXe8@PFn|PojBlncTkp7P9o}+S-;H0=mErAxjx~PP6aN*Y1g&lw5rrbE5it zURiYy!~L&!AUT1G?@V`PKrYX5^dR`He>V7OzjN8l!p+U?e(8qrz~wFqa=Fkn zHZB~-@)%vm_e%jP7iiHPpEjjUSq^uT6ALxo^c5()^X_!y>udb2b?ZjvUh#>qFJ8Vl zyg#ue>5;TE7>AebGn6XOg|l(vg(aIY#4B5kA~g?ma^*>w@moshCT4$f*#recL&KMq zlZ>TyeVBK*uO!?mDJhWvZOF;V{Q$@x{N{})e(S^H#;5LXJTxcSSy^vq*GvTCNRA(8 z=jIN^)X8ez&?F%v8!du-j-0!Gf0`7T5$UvhM4FKhCCvoe&pMH*|0gp<5%~w z(6)GylX*J9NNR^WzI{6<93PsW&zF;%E2e#``HG)3U|3Va?iLj_^_@#} zOIzN^7dM7{!rNY|f^C)6)J~+Pred3ww6)KkI0Ak^4sXADuY3U6{A1_(2L`qvyib14 zH&j8RDKaB+aSl}dl;fl8QoZDsS;4MJGa8yOC|EBx$QL^_RKCtugMdR^o@CCg*nx!5%Mss;8 zJzUUh-6p?$g6`PiL!g6pdz|Hw#X24sYRDZZP+?JR-5u?X_30U5ElJxX|;AjKYN^uUIkD!y_XjC>$m>L4xsQ zviCA+LG?PF$XAC$0Gx>FLJu>)z%@21FnUfgBYpl$9d6^sFJDeyy?PbmM*vQ8zJFgt zQc}{z&8=%_DD2ZT8!PKW%gud*!;Pr|0|j78V|f1gP*(ND2q$ zZ}L#I#R?=s!*z1 z+}XWoR~Em$?MX*ZpPiFaXFkh@x)$DN(|A|#P7wASpK!FlKhe{tPqCq)@&&yYpM->l zqK%C^pzVWCvyhXJGcu}axlo;%K6Cf(-N$ipK}H3N31Z-{66?WDC?3>zpgA~1MNcEP zEcor;G$ce{DqOk2CSQR(0K15KL7xMxjL3mq23<};m0~_0LCUP4ppccDJ63;kU-iZf z)?Ono-isFPY+PJqp$ZQ>ChJEWzB!0m+F6afAE^f)JME)E{)s=fotW0(iirseO)zm$ zAMApCRrTzHD}SBhQ2SALwBv?`#(Q9TAXCVrsr+Mc32GebcKvcDnv?)RE1Hzp~#SM^2pm_Hbj^Vo-8}oRShFx}&GB@3B3dNqYSFI2@(%W)RATtRL@OOcfBpecECwPpwT6$ml?%kF#(ppt6cj}9@URI9 zQG*bo_aXKdt9SUkT3T)vnh+nQ;klCA!-ChEa1a%B93=wA=|YT=CudHMptv}$Y3$wD z((Ma$*Ob(lIXD7ezT}~#r2J@IqN)YzivOT+_3ELhs3>S1zB!fu@T`9R{P_jb#$z}f zj^yM?#RPh&SQ6_i$)+FL?yk-BfX)za-&gy+SUBVFrEzT%pw4hSUJhR_AS(JE+L*V% z5pBLZ>3hY|Q2>{A?uC@L_8A6-3|R3nVO@TC+3D8nyQJ*w?Ea4)Et|oTfvTFGno7yd zjjyQCQ%lTwo|_v1@TbUOL>BZf5dQ((3}WJ<{q5U-0aQMlr&G^xfA?r}b2Iz{`Hcs^ z@>~iedI+(W)>bi(Wi}9fynMcN0Er=*-~cR^q^_E2`I}lEb5;Y1S78TVWT_YpSZ> zfhhzK{^()@WM&;5g{mhvlSoJ3h7njJ_uLX^K*0hl% z3xxm&ae-~VrmbzjM++u^EZj9btgfzpl7WH2<^FxsqP`2Kk9{8>N9*aqFor@&NeP9M zJx6{UxSzR!vT&dolOI1OKmY8?xxea1-kKM6c6Q4A3>W0YMq1jY-K`bGE0>m*ut63- zm7cJ=>S9+pFAPN$sdEWG_%LtF42PyDuLIi0M3WdgA`_EZEfPsIYvAw) zq*@kEPEtr0o6AU!DYiQ0I0fL<9(KUkB-2Qu7FkjtWLFV%u+S*X&E(r-O0mt3-<;!~@cY#bf zfkvaD%Wh{+?g;!ejURFNI5gA_ut>~%^MY?}a#xq8d0VWEvNGAvga{dW*A)H^pmu34 zn%CfhEgfLjCX>Px6q&3lTTagCnkU@mjR7CHhOMpbC1K$%C~3|&PHm^0b8>br)XV!m zF+s_|5ZYT|TB4YlnTd#rna|fec<@J(@3xhRiOJ0(^W%pf{((dY({CU77&uVo z43!_@@?f!e{OzJX_vK~|(xXSmpebhIS2d2YS-59Y5q9np-J zdy&HhITz@Z$X^GDUb{wOUKCecd}(=QrTP8)w>HA81Y^FK;Ibjl_`M_R&TAqy@7QAu zFyyD?&ygcZVSU}rMwmV?FAwhV)lOug6Wv&41n3X60akt*6(xW3=IP7v6g^?V#S=#L+-&gH;+3lZI0FAO@h8bYHtEF zLwel3_zV6VgFUj%0sg!+tc>2=bjRbNpKQClSXB?yF22I+Gul8aBz6-hp3S?H997#b zxgd*Er#vkZweIes5urIbU*_G!#l_*E(ib%~NnrENp&_+9cbFmN-v(y`z(yrM;{Ca| zw+R>&Zir0an&=^U043osN~@*aFt+qj!&qJ0RS0-JfQk>m++aDSYgi6R$;9{Xud%SQ z28D#Mv9MrLQqHwsO73V%CaiJ*L-?-#q;!Zv`iKi!I||C;`dnW80DVT+XqO#vlt{_LF0WEudE9jJ* zy!;cZlBbD@*Wn|Oo**hyIxS;oAU%k={2&Iyq62})?d%gg$&YV5_ z-0${ZLrIJkm6cZa?xhzMMS#b4b#=XOZ8hCqorah&5sr@|(b66d;z)v|kG;I`=Fr1; zZ)t&bS2i|gnxCIX(BYRa*IZqNAf0=xOcYvnvOq+2PP<^U8|=zfzDKB2o;};PQM>O0 zA%*0~$&9Qq$Pvu#ub>MLge2#?JI2I%{>8$A9pOiy^hY=|7n&F~BO^vAlI^_O+E#XU z6CXr|pTiloFWXdUc#TwLWd6XVIY|B3@`nEN6GXD9DJvwZ5K#q^t|MpzgErvdl~h!& zW_}Xr0jBWB?n=iexU6m2paS>_ToxY(h27HDHUk{!SwEe)t)b;5vUiQQG1uQg^`)ez zR{=$ml9EF40Vorf1!EwO&5^+J?U~$LU@TnPj}CA`C~Bf^bElzHo<4TCuD(7fEbKhs z=-plK(Xp{k>k@2s^Vrf4U?nL?h~A8aU4C_w2Puul{%K)X;e}{DoaB3}(v4TY%&x)m z63hAX1--4A$-XVE1yyZb+|f=A)Vj9?N+K%*w7sK|=f9og6cDAF+RHs(}*nvO25u<%Jmh4`~)&mhQ(@os)Kxfm zc#fsRew=sfEH9;@$RYxAa?W0M3xOppzunu2c>MV7Fd!E|peF(!CM7WjG zXkw4@z4;4LH$(!!K6Zf6@@VoK1CV5a?05$b4bwVPSTFcS4eD2v(CqF`N<(&yO(~U7d$`!P{*_jGCV-S@#9DGmC24tATmKf7OD80oYjpDXCQu{ zU6uonJs%PD@uSAmh=}Hn4i2!@j~@4@nw*CG< zB&gbeI?INAG~bL|ON&b@^J>gL{}pSp|7rQyv+5flpb_MqyGt~iUH zb#ZZlO0IO{#woy2#igZBlaku&f@uW3KHET=kHbQL=RO+5K?1Bow7wm)q$C4?3iADs zoSb7PPG}nWf)vVJTwGJLv#bEiA|t7gt|>g613(18MW|{Q;y)lO7c3k20H+?j*(dCD zqR8TI^?m<-HzF=d$y%A~S2FrEO+HxwG=GxM`Yp&j<)CCNU_c0h&DXzLs9P6=g&Wm< zmrvEK<=LY(H8HVgA<4T#)FH0ifTPAYHLy7(Z2&Vl z1E3U1mWHzrdLIXe$B+(TYB@DClLomF>Lqj`00JVZXd{d@t|%#yLe4$_;i|6Cj=>-u zdRR*dSKz)r!!(82aW^sdhzBB zl&OcJwb?n!_V)ZxbC4?lI|9L}kp2V;B+w6l$pC*pPDoI7auNjKC65+02EJ%!W(H}n zpgsbla$7KfoPq*DW(ZcaurR%FdNUVbQj+(w5<*5$o;pQQH@UL()w!b+Kqe4{*THoW z-`D+J7M{Wky9lUHAnXJCY!p-(8N&nN3P7B-u*VqHY3mvWz@NQ+VIpx!L}XD+UY;1K^aTasU%wgvLQpY(5d{w1W_}TrjKbAT ze_PoCv)uta3mj9SNdsA0dOF4A5$Mn8>Fej+E<(o$C=@z^?k{rMuGH7BUk82*I29na z5!?veMDtx1$Y;=pLdW9`7Gw-R5?KH4$BzpzZ96OF5_=;m6N|&8J%8?xBtgJEaC3kj z0Q(4;5XK1hF(xe2E>v$AA=qJ(A3{onzgo`A>oPo?!FuTu6#^YyG=8-gfbU2kTQUvr z6E*;K$f-&}k7F}fK^>}4e_dqIzsDMBFNPVBe4~K zKKK_j9Pl-ujvhcTy1SnSL=0f3TT=P17F%3g95h)^;^LG5t|4U_{1Hee#2;;Kc?Sn2 zfxsMbtAqx`ym9qRWMnjSbj`FQYo1$cpK8y)C&$>-rWPnZVnCuB6Jcbi;LxIKV`FNe8u9CYKOyObnE?&DFqNLWd4cZOqw zgAedR;_rob1N{X$pXFvfI5eWGVkHUyT(D4}8BmlvMKwu!HrlzL8!LY_=xF+${ zs|yQND{A1j0D#Wdg4Y>q>*yd4F+pv_4li=>^M?a;d3r+=8y?ImCB>Mi)-(((pcG-# z6ai+h;8}qDnr&;~X;sw#>;d9HK;-GuDYdmsz?p!n0&xo{j*{{eikCA3a4gh3AQd*b z<kf|*{TLvXa3$S;Htjhyal;4vP>bMw`|1j?a3fdSbtlDiOw5pwh- zcl?LOc>5})tEPuQ=}4Lf|4i+%rnmvT2&7R>@JvHP2H++`JS9?c0Lz z05A(`22`Cty``9-+`(6%)dcE%45qSBk6v7lSFp0;Jaa~eC)xq zliv9l;3y+jE%kS*{0?7YCdK~=+Ix}=ZC?-2{`M{*)?Fl?p!zxvXzfv=Kecpqiy(^vlXI)wi5+HtfDLss za=7fu=qm@+0EENj1ZFb+6&0SEnon=NabVus+5+wcK~$ZVa%id-80c}py8xO3*9h4c z+&4A9CJCAWDtYLy66fY@1TS8M(Mf}i@Ts$BRe`euR&c3<_|HGUr5q~LkgCvUhcom2%K->qzmOEUKw2;SsV3;4-%2#;|0cGBhXPLZz{K4eA$qaocKX4bI zZp1xGf4c|Fcmm*gF@Ja@1n?ddzd<_-pr;@k!-G9n?dw3BC{E5h zJvrL3s^|STd7%A-0m@@|nAriIku3s_2hy{}v!=0Bn41C%4?O^Mkx())KwB*@FGEWV z;3mDtI_?;tMIhs0&}dTBH#IkB;_xc@3B?h$%u_L~NS!J#FTZPM5fPDyq>j;6nC?PF zMrtdJU|`+|58S{rQwlKR1=>3ex)cOey?~n{<5FaBTva83|Gol^&?}gUBgqnC4oQ}M zFMcJ`1!= zhz{x>wnOm3g}05N^h-N?-mpap498*CCOQBtB`oQAUfyxQKi6H4^XBma`+=lo05#36 ztyj@h=W%d<7sE&%iJakFG>U zMIjG8Lh1q^Lb}+<1XWpY2`ID(FKz+&3!cqMWJ*p!R7|0Xil6oa`y?atuh%?cMzc;t}!@WTf|FdPRkLNml+yBkfH0>$cq`k4U7ZuvIh%7}~sI-mHMp}?)-;}1Ptf_={3GHYhtz@fc zQ6wWZk`~JHd!FdNAHTo8e}4b@&SO6JJ)b`2?e)H1*L9x9c^t=iUXSgxg-frQY-u@k z#jD*oB(CLF^`98pdQen_`>0o*HS2Md+zH(~%830Y8|BKe`i*ilKY4E%+HRRoNaYs5 za$?0z_{%5OE1cf`J9YYO&yk|ws_fYoErq1W0)RkXVKSZ+&PsRkLig&{lx^5J$5~r< z8iwYpW&3d6U|g>eY}kPJ4oicBT}E_a(}>u$sad#%xw$d9+2-KW;xPEVckgUP@3FV< zuGkp(nD%GUAeG%xar^mAg0$GVb0<1ZEd^o*+{f~n2eQBXGPJ|)M*7BT=H})i-8;lW z*|rYZ`*JyMjIz;JK3sTkx$^Uasmgy0a{BlzYBQ3s4}Nto-imAw=U@JmabN;~^dUJ@ zM5)Az+a=!xG>J2(|MgG$qOs}g>yxuH9uXndH$Y?|VCyLT9K@{~32ymdx7C|j3DT&$>vkN#H=)gXj8`&N}f^Egc z#gAnX!QtGwaP-e-&velKQ7Ay^lop&+0u+F=)_36=H+S~(_NLo?$XRHucMyz-gGW&= z>l_5zliHXPa!ARgNe~^-0)fM70=^cP^wibuIH(jZeJV2mZwjw5*36wJ!e% zSR$x?M=Csmuo^J-kMLMO`uotFJJYlXZE!Szk!jN{TYvi5q*W_*4hbqGrFYBi4X(ZP zMu{x>&KDAwi>qs`P(QGPVnj$CM>k`J0qh=t6z++N9`A}R∓6zt3_fz}**Z2GS?b`+Z{NnPLyGUn2o`^zSJu3&TxL(?5 z*ejQwhi0$jKJbGB9+Z5yfN>$W?ntv+^y<1RH==H*Vm^B|a?Mx} zq+jFu@~PpcyFK>~bbe>tKcR9Z_AE%yB ztv+7%VPPVu#9B3J+Ejn4a&>RT#sgs((7rJZDo|&N=oS)U21JgEimKxV0FI>#v29;6cv*{Beb6peW>o zJ!9=UF>=B0r0=)bXWQn|3S($yW)PH%oC1~k`by#9P-F+%%Q}L(j%ifxd0PB8DN9Ix z=w&T-(7}Gz6DfYkE#2dBu9+eE(~!`s&+x#uPv@A;c2`x}hoPtVYhI}f6b5`5qyz@6 z29}nV&?|;lcYyeF6NVI9us#V#oW&<8) zWhG%7zY$}fLAPn>g$w%uh>tNa;#RtWkdKHue#d~@XkhjDZ29Fkjc{__8jm2xzLDdR zks&qtym<;39NrwAkBNs`2QLK1D??;Hb{io43Q0w346HH$D4;HXESe`w8mJ#k1WggH z0-z-CQeDKvUdni$G0tYi0KK@wY}7%^aZ(~tPG~jZA^*=`y(Em4PM*@U!eri{byVffbOjxO9%RZJj z7o zBdFB2tqJ6|oGl9rQgZh?Uzy(vYE+R@q^@BT83c8Pm#+M=BOz4(v3l zLyN|WAbv!7ALK|CucS?ir%vIeNquwM;Mv8Gok1y7Yk9+gg{TTrXVjVtVP5kKX3Vvv zV{7)MNNxZyd4e?#3_yY=SKU3j=YEX4v*r8|wYyg=*7g%``PMy7lPsd$8$t zf^}^q^EtrDDfup$K_~>`Bc+#|6}B^pP~u}!Qfn0zi8ab#VbY(Jp=j6_FCGlYvHJ1D zO|yURceqTT+#4cTVhutT2=4+FNUNOWcS<*&Tt^GEK}!UV1e<7&DmQW>F;{j#4~sgk zUFj}jDTbwa5Gy;%tUWzPmn77TYTLH$7VBHH273NRv&cdmM6?cUXxnG{K+kFsVf5Ww zbJ9gp)YjHMzcQ81Qs~_7J`ih%lD{S7Y@0ZR^&6O+bydk6cd;1A|t! z6WupF{NSngy#oRRKJ9kON0b*AFSf%jgEEP)=ix(R zFtnI1MGfgnd&y!BK79z@Cr)T2`crebOx z=jS&dd!Mn2-39+1-_+8w)GZDY{Y|NhqmF8f_AuwqnVPgS!Qw#f1;oTm;|)wQF%bzO zI$P>v+vtv%n9CCo>PW8~t&As6Cc3)n@&^80t@rL>@PpkQwI+c+v3F{#pt=XaUy0x6 z+Dkzlvu51`M4XI_RM*tpiOaZY)27hrvP94hpp(2xZ9TpFVD``&$SFRDMTkYFz_m1<$pk-BO@O%ZK^og~&E@hjz3ihN0XZuYgc1CRy8%>|LFgtU>X zHDYPu+Jtrc&<`=ib1Fem?YfWhLa;#p%cmAufV|Ro_GLxlMEe403hZ0xjewA=>#lZG z?U1PzPX|NGus#GN343G$=+|)T-kvKK~1gE=`156 zBb++$IKFX6ssTk4fP@f{_VVR8JaLe+zk}^JIQ+p)s}OeseDA{`U4Fab zDE#iy47TD1nwyvF`B@3`_W}oeK|G9UuwD{XCamxqaX)56m?h5}h!xFd z3wi+FaQ<)`S-0P%7y7&;vaORxS%o_lf9dB31Dq=g4mneUVA!ZAVbep=gDK}6u zk^#Gj_={UZ+5+Gi<(S{QC*U-rhzuvi?-h7DyfK$wE=SoBpC{1)Vss(D zfJJc4KrTqNAkrWU3Ix;(Zb>xA^I};^=BQF#9dp-zhYQsIiWbRaCy5xz-d&Ow4R*?Y8V`4lYg2a`+2~6m<7k)YEN*sE-T2Tn>=Yt55H)#0KYsk2I_BPWoBb{u-cE{|$ z58)QZ!`e1>$dJnh24GTkgbj`}_Dk%2@Dfzvja#;a-bqY+$9r}ep%c_HKqXKN+9WcN zve9HCjlR7xYjz}aAJZTTN-0Hb()GBmnvk1}Im&;?l6KH0u*9fW;t4CAK$X$O{nJ>H zkkZ2@<8s)MYG7%JDq0>{^J4&5Nv~Z$E97l4EBKxHRZVI0x-hULQ3#trWi0s2A*cC% zKfd9Wr!MTjwdwbR18ht=UyB3KDCj0Xg5yfcEJz-Xubh~+b(D^DBdl6A#oGD-6vSjY z1M0r*;ON1ccL#JEHtf)|XEWI$3qFhD$WDM3;NkxLSadWpA%vucA@nA!+fKAniYtGR z#;B?<7A?E#K74(n8>tnw0)a+YVxl4@uO5koS=8t>^4|dFnV<&$ zcc}JVuc?36o7~>+GZ2nmdRP{G=6vAMk%mR8WVp!%jQB<-aU2Q>K~LO6BHZ0ge>A(y zH~|6m#&c27gd{X+LE?frq6c&0qD8$(^HdavD0G%zclrzITtpQ%tlxF?G67TodO`-u zve^HppFRLzO%031t$?S8m1qq;Z52Yr%p}RF%~1mGp8RMG>I; zanO<{ePLZt_s`*vie6njok#_v+6QP?dkRBO^{z9e!Aj7m8%{b44_5;afYBuRzPLVn zm)EX)h%4IKVqg`;Qv_z7X&c;waepXuOh}xaap=l4cZgh)5HNgge;SO`hDgrt~kd%vpZ+z?!brIlFS)mc~vP8B)W5v{pDjT? z>umQMpSu?nGWv|?)pJYCrtbgbF)ET1h78g=KjX#?1%a4=%4COFM+{mk?`uXNrgj{# z>1MZ1LzlkBh5D9lT1Xu?PIX-!=ZaSkFHaKfBVXOpv|E6sZ_cZa`GlLh_wL<94--Q( z>X9}Emf7y5N#f`jG)NkkNkD2fp~BJjoV zgAG>y*@5Jv+7ds#M&G{2smTDF06~4K(3K@|-!Irr)h#@HO_@h1&*hD~e%kow%xQC_ zg=Isyy{LIUTfFC>i_yeFY626=4J?+(>bj?6E>k=jQz4$>BfA%Cid@-alF zAxoYhB;I7>8d5j~gm;hcV5G_Cungo>C)(j((549qa{=R+t za6AIbcHW%#I^))xf&6skyI~2^1%j5co8zTzGH&VYum6;Vk8qTg;jBh}P#@ z%-?80O{&<{!syM)E9Kb8{$jC1#ZHm5cQ0dgb(l|m*J zmqa8)?nRnFjshzk!>j=~CT7&^rl5G`%H$`HpUZ^!H68g?#Owv`BF$j-1%1OlznnAY zi(arsnBSmxQ0wrq z=^tOdfVf(VvX7q}CusA|73My?DXS=F^v%}t&zx3l+aP1+Jfy?F2x-cLvBTs6^f0JW zfdLisarFuH5I1*k`AnKzU=D5`9lNd3c1OM7m`j&3P}m?#C{m662a7jV_yj6zSZ@kG zn5D?4q4p{vhd?wi3xa>aM%yIcMY-u#I4bHvV10Jf!ap&iavH)-gYVZ>Zr$sgQ<7>vBbF@AI`yaDuoYd< zZ@VP-3{&QJR3iE>f>sDx|1Fyj6{g~zk;Qh&&%2%3)NR+Eea!~xPSIGJ)Tv7svE8Hb zQ6Y&xLZ|4s4;=iw3s5eVMP{CP*d+2Ke&QsiDbQ(zimIx#nB?RbL0IBKnWC{J++Ni- zIvcUOCu%;%1gOf##b4d`p?-;c#$U5nXIVmg|5;j4_OnX#YzhlNHO%F>akm%54rJp9 zB=Oa>K&04_(62-DbbbL2xSyv_ckH5~pP`MmXAy2JwiK#G>7Y@vfgMaB%~l8z=ixCb zxl=^OJzD$Fm)>9xsU@;M{5l!!n{K^Ki|FKiWHjbkZ_2|##` zq2U${wlJDXVr`JD(Q!|lJ0~n6@3$^*bO(B2 z{l)Ezfw4#L-n(#4@>U;j&xFpP29y^m$nAQQMw>^JJ=79$Sn44u`0Q1b_L=X3oaP|YZKJbpozvCX#Cbxf zpi6V8dxFkSgsNtkZV~m><;>m!IX>M7Y z@N*8a!e`Em8SN8(WJUT7U8cf|A0ok{RKMEZ_lTs&L(`q8j@QF z(ftA4PTd59Dd7{o3Uw0Kn>6Qz+wWHA%Ss|5AxoZSCsS5Bt-|F1Oez02blJ4&|MgOB zDY6#ArWi)wy~CXDjcrxcn$9<{z3Q@MF)bl^`1P&ToCu&>y z)7mKdmTi6=d`>uw26~!~s#t`BL8_rCMszjaf|#KN_8o^M^!#~|L86ZN-YWizu#t%7 zN5bb{%%@q`-u~trNBZ{s^;Y)=$q4Jde?RQ}4Lj`xR9D4cch3NZgRaiIeRYuK7N9FG z-;i7k!gU61N_wUODZm`{>ZK+m;=9y9GnN%*2Kj+3zT52R6hsDrB%~Hta7nZCK}Y}+ zuP8&21*HF05 zO)f|`k{-oR+`8sEX$F8QN+A6dz9_ZBjSpSe?82!-3fj zTlsdCOMK1i1d`kM@BaO``q;D%CK-3{-aUc_qZDqYt~RvYpi=fBCmDb>zvS>ps)J48 zG%aIw5M4miB3u+soo}PcUpIK=Qr03qA@3*j&LDrfct0_3XCJ*OGKhu{1;EgGW%piD z@<@{fWJc+?O760*IeKgPPZBg+P;?%ev-bpjSei2HsxNXmIjyy4i1{vx_$O!PjN3AV zORr1T;auR@mrXGu`r_pw@Qh6`Fv{5%l{tpQuLey3TFc{76)q8 z(AKsc<#0#btabt0$_r5&r5~5Bl|*a-`mnSUL-%=3(@?%uv?_1q6~PFXF14o}9b!8& zVJ~`*z)#vcq}i;Xpfzmiq9O38AftPC_ISSD1rv#oGNE$4ktT(tFz5Vk7_j&-bp$gBAL;=tuyG{1rgVp^y=%oal8&Yd=0yPCm`P&5k;_4VD7 zX7|=bVmt4l6B$&f2Fww%w#8SkUKM%t*s)DOc(I(r4E3v>(E+DN^c|a_y`|h5^^!-6 z(ao`2W*x26B+D=)geZH=UM0_qpjH0evu5q`D+TRLb>1{oJLRbJ@P0_O^14;p zBJ>XpY{A#ha|y3QplamiM-5tyxPfoM1ok>_A&E-d*ss==OtoIOfvFY$0B>pSfI}ar zIy_sTC6Bv6%ry8jF<4kwTYDkN;$0JX4H3#uA#<|J6>IoDyAhBI z%cxfFFr^Isa~@>?oD&9*yLDZf&`GRJ-oI0fqUe8~>shs$7K7?N6o6_f$gd0cj+}@? zDEG&cOPJY+7>dcmO*)ICZqP=m)GnTtPGG{z%wN!bwP*M4^R8vK_$G|NGNg0&ucvLu zoISj|gek+{!fWyVw_tw{4c!bgmY$QYKW&wEK|z7CvNE}MGLkz$ z60$^EM2?e~EbWroElQ&tb&SD)5{v%&YwPLA_yEip-UhYhJ@gs75RSqHVXElg550z| zZ^sO6Us2;SjGWzL0^CgUpe z!SfZA1b1$2ZKlKYDcV+duZ2m#Cy7ffRa$9YE*Ex8v1>+VclrNHY^;1HoxGM|H@qd) z0bo1K8#Lb>V6BjFlfOQ-F@wL%3kmgIKxy~t)m)y2DuuMnx{!$h+7vrbD?kNlYvzZE zL8*w`BBzk1TU~y;fAUQHrEsA6FA2{j*UMV2h_9&S9vNddF(Sih1c+V6x8OOZYN4)W zm(j;0D3G5BKXHQlgLf+`ZOtuN%wSBZ3K$bh!@&dx2bJvXSt7CXDjXbo6E{o0Bf8YS zV&+~Aggm^X#Sy?#W6efe`@-qZy4H35%2xG9;J1-@jCXsxU3s&uShowAyYwNNy4_mG zTVIrP7z2X$@iuO->QQ0Jv{ht29fn&+x__vm*(>!T4rKrnB{)T>cY^W7Xkw(9#Ce^S z;4ue#0=(^>Z&ZM>HkrbRj&t=2M=ze zh~2TXe_h$d4gj98TzRXmJl?>07!1S$yGsF*+OFkxS9q&uFix#Td{ST-M(HHYBnf!< z9)#3fbFDLPUcYihJzV|I^~~K_*FwABW5Dh&tYlaXw-l(WvoUx z_xIrwKgUckEaltiFLP<#ywS;L@xdIRZ5D@u?qgM%JbrvbpQ#$E3ta5{pkQ#1p~A^f z3&JfGpk|*w3kj5P%aoZ64v~Z6W2vJ`H}If$Woa+7Cj9;PU+~gwO!AgSpFTa+hixyv z>>_8S_NO;Y(MUw?guKm{mVSzY!x9MUc$i!U7gHNjcWhCtIaH4tHn4NB#l0nc7totv zI~e-tI-rV9q|T&Y`pNHVMg-{-3i1{b49HVL4hw^6x7fvH#}ti88p;zjCqUVOo!%A{ z`UD@?%2CYFj_-@f=50cCae^EW>1p7Q+*{82k}ds749=xV?X`Hl0&NUy{RdO#%rU`c z2Vw_8aq=h=f*fx(QEt}`H&$!?4d%O8Gtqk~2$3P|^jbuxjbCZhTgx_R3?>gEtzE#K z(uXDAqgNIfNxl~(IAm#zBU2_#f^aB}Dx0>*iT@Cu4(KW=V~bak`E1^f@hm2q>F zBlaD|U$Qwb@r~mr*bJrSjb>5ff+dV68fk(Wg;+%ylzG0r)YYYwou#4NqeFA%-x$%Z z&+DdZ2xU#m7C?qr4}5*cV@MW5Q=(0T)H->q^pJ^;!q7?DqR*d_gtUdQdx)dUQW6+U zGIS23E0@ub;mD+097m~9?V4_Nad>1A6ML~VxWs)sNtN@{xb&%gJ?K6K5@6z9%{)2{ zfIfq#g8HQ2fo@}VQXW3#@FPbQ_~g0dTGBo7N_KE&vtyBtd)QUs^za=j@fGNkLZ^HF1kOymOM~R>mSh{M3a| zcqV=a>G%O78H9RZ09E7e-iO)x(!VF1^c>5r_m-;FHGV_hQlE6g2@cCtdIVe+yX47d zpLL%4tWolPk}8dxp~xj&X`X|LtX+UhzZT#=?poo>sRV(q55|i^C#@PcH@EOTcu*Qz z2?%5(<26`HrAN(T&h49bHfs+VHh*wHy~dAjMp}g+5fz(?^WLHb$H>r`nVB+k0H*t< zWFoS@(o5=9*$!Zl+0KuKkah;8v9MTivM(;{M1SX<;?6z0cR$Q~0!(5rP^0mV=16p4 zY^e5YV=`W(8^ijNWgtJmm`K`-F@72c>p>UZT@>b<$KcvqA6J%sv|jNST>t|D&3B2V zjwfHqL|1VD2$xws>KBBHgSo1#NcDWjNYeQ|P|9VGjjk@;D5yL$0nJCBK#M5*1fP~7yBBbH$ zW{yAy9=Db$j$Yh~frGsm1bt651_Z=AT4Bm8HpCaFQWoKkauia2$|mWTcO^c*lcgeG53HdbYV$D-GzE-r}a(%vcQ!A3?s z@V<@F1DS%MPD8px&%Cao^|D0h;b?~{|2j9x9BtDmDeL% zDaRFSbn17;&5@t)_QdrhBVPs`)(E2rmgYvL(^JJl36J_K3A1?8QNr0)^IZT&Vw!SO z`iW&C4dyREjx-nJAcA?={jfO&=FX^OMQW3m%NbnIMpUVpbwTrm^R){q<9~SpQnBqO z3K+#Xm!`d^)JRj@gbZA7Oh}c+eIAPPB!8p%E?Hk?N#X*IiCScJ#z`!GpLtSvEJ8Hq z5*!maMdH~ZfIpH7(n%C7s8pri7oF1$3&g*RLAv?G294r~p?;Hyed3*-iA9gF?Y@r$ zDtnhWEyl#Z>Uy5^E1iGUq%&x7Wprs0zqLsc02M+U5rZFfvV>HI-JMA5GK&C>sGym1VUx{PGufnHQtOP-Ota?qR_NNJ2zXmVi8#KSxLP9fPWq(w^M;&WxGUS@+xDEdDC0u(U%-tEW)<(-M8j#9gll_&Q~e;{uBjhOupQY_wSM*Sg>7$Sm68}Va71_9wteKo?c zYb|8ft4yU#IcIt-^h2*+i9WcBh)G}<&JEi}zwuGuVXdNuV& z>!48t(sJNj?i%>?fDz7|!j(w$D`PanT&gPyrRS0<^ zsPMs1$P@fN6_gMso=v@}yEx>wp?a{Gov_H4&I2<8LYYS*)&V9!0YWKE^_N)p(K8XD zg8~_U9B}+5Z6pNJ+Wz0f_78CI!Uf9(1Tq~K+)Tf&Y9B~V;vj&)=Io4#kvpBsR*?LQfXJMimIzy)f2_j}REDUpd4`6RLy#g^5#!!C^;-KkEhW zP1i50Ew6dve@i-lL94VNWqr8M_Y!b(je;Rz1Qb5A`QZMbHJ zWt8nlE03yT8|N*w+aRFPwnmOHz_cS4yu4J;SG$t6ZiwtaZ3A^wFq33Tx+AEQ4Wzm% zb!ptFLBrq20@S{B7+&F8Aw?ABJMl2@S+ni8g}i65$2K~V908$X{yVgc%*D`SZ}8J6 zdKx#wV}Je|+z5Xn1DxutTuCM0MNLPCg~|NzVJTUU&b=5Q_7tlAbN7vCj?#HZ%OHFa z)=!u{=`iQ~9hCOgxh`-1knatZ9`Y7PkehVoe%kx_XO$RGrSg-;9u${a2vSi`d>Y=M zn#$kvpn}=(yRsY#L==OpKgw7$>MA%*yD3J`o-ghMZC|vm`!w2FSIG|0BQEW_5n3&@MG;gp@y0h(5#f3%1}jQ=4_^JqQ=3l zQbn1;P7ymfax`Xb7qdZPdq`^%0^XQAZ|tNGi-%-+1;j!d!5WJJs`x7pafr7N{jDhW z%lEI&;8^T*Q;gCuepG$WHG|0%3z!e$u9%S{f>t)A>geff&UFF%WH8V3<=6JmideV8 zzd&AplzN6mFtK}hHQnoP9Aom=T+=xd6XQ!kH_AjaX?{R>>E6cMZ?n0ieeu=$?{na! zp3W|7fjWApeLoT#K3NEZIsJqBw`X*QPH-H>w${MMW*liiSOmH~LLctH)=Iem%|B%v zGNTah6HBQ_@_fK*yuC1fB}8fUVbKkizr2q(9~k4lKewciwn!PSB?I~1Qp4xjF|C7^ zP)ZO{vB`0WR=_TQj8E!^U3%07D%6#H6MQ)Ws=-$5Q|$T^)~spA>6SX8R>~>x!k$)M zlPPv;nL`VC~hh79<5TUZeNCbk8p+iDeXj?7}9 zvNju4q3qQ@WH+MnyOmdD#2=%#-x06{kivQtQvFPX9YGlXkekpd?$sDRlanFg1;abX zsCKxfp9b_E;~<59hXaR8;$uXf)pfSDU|uFE-^Q0DDAxgiJY6x75=isTH#^vGVO3cw zqegW4DmQh4r6`kmD1Lr3nY?xl3a67$PA=SRQT`tw4MtWM`olI_*&>7tYXc( z&3e01dDQt@g%?K#1!kG-F;QJQVv^a4Rz>zj%#dZI?HJN0buJn!_y8FU;HKljPWyy? z>w3OhL;|0c1P-H&IJinsG1HTBtD(wiL=(9OGDhuv0>C1M98= z>-Y+=GjtltOoVgi)arV*sYS%831xtcLV5`i2Q0G83wFx&ig%qFZW=qK@)GI&Gc)OY z4jf3D4<)ADu5Ef&cGCQPQ>RLOsu3o-KKN(?ls;vX)0YlXJgI4zWX$x)Ux^Wa7VRZl zD!aF|(5Mb<6tD?@!l-+NWl_kBPeztCZ|cGbb(tkAlhDqO={80h9fvMMgOSnA;_8EM zt(bxHiS-?eQuOFiKLU%3L0owQ5288I7gQE*8hEF|W_Sp?HZe(Gm6q`GAH(8J$v+7- zRVBie5O>Kge}pN+&z+eQz0K#o87=&K%;>NdA)VZ*44u>z2dFpmkp^|&w_@&36ACG@ z+IMZ+bgSq&lpUDwUaViw$VY4?5aN#ir91n3+*&0HDL#+;3!-T2Mce!fi&k{%s6f`9 z+Xn7J6EgH=Mw9L1D6UB`2rx{s@}uXA@LsbU={YBMiR+g%1HAu*3HC>RYwD;Q3p*^G0&l$(Es!D=&o{$uSa zO_epeY4!B!Ns)eMVm*JUfIt`6Uc(&Xxp|%uWDYeUt{?n z?Phcz>(gPS-rs7x`GOvV8)@}N_4zw$rjW?=we(bMER+j`C0~xFw=(vuQsai=nZ|pC zG)pxtoi!QSqr9m1(T)X=LpfIKZ_HcR|56|^z}uy4&#>}lEvDcXsd)K53uxj9BLD!b zi|7kw2%O(*BD{=1lBbsbMcBNa#PkLqKqOC zd6Q%2Nq1gYWMr#D5B5ALNGw>BmYrruFd6xBB*CWX=Q966&T7!5diZT>yOh(UTSG=N zL(_zahq>ljNsV-K3KzG4rsI&R4~r39$_$M(VFRe&+yl&a0Z@@@?UyYHI~%(PB+u-8 zLqij3g_*s&z!EPT0F|9HDv&9%2pITbkhha?Q|q1qY!ji%a2%y)JRi4a`cNh{p?FI3 z0pT64MpcHgCwX_q?0f9DT{TOvtAmty*EC{1vcCAK{jP)#|CSUcug!e~o5iG)8_O?A zE8~vP+d{1jY6ADMk`NA~c?Fu4G^N_Elag1z!TWDBZv={X9k!1~+GCu?8?5`Y!0Y~% zyj^|7(X_wzYZ2{EL`FtLMYTTkVCy5>o&-hO0XS8Dkb{g4>dvtS_p#FYs>|3&EZsjK z+Rw8bTB*K9v#w1BJEXJYeN~owyD(*`n->Q;HG)iMp=3-cmUf1s!CAozz~Tw|0x+Rx z^7N$@td|qRM^^Hmbf%P3tRWuKEuw@D@%pi1k-a^X#h|tS&E^GNVPc&t*BVJLUbIG- zj?1TRJQAQ4Rk(b~Eu$#-YEc~Iyw z<>rl~lTwPdiehE1B5NpEkwRET)nQgtY}}~4SBoj`OEE#tUX?Zha16mjm&02zv#=>B za~K0me=pBnzCcud6eW@u^tnsiT%MSqOWTQDhG;&sCkn}g`3jGAAoO7Ij^4Ugot<(5Ri2Npay6zZEd zFiG8hL*99CI~!SOgeIp4282U+-V)h;z<|O&HWf{2p+`Qg%a--<+wcwq0!#iW?c%Z^ zj~x3KPRYT|TewY8IWne!FAL_W;Ol$i?|9D#&-2krST_G=n%ag7Y55d+zP z1y`lHkL5*zZ``l}>}lEC_gOM35+r!U{)qAs4lLbkyD(}xrRWw?QOJp`W|ztNmmcs$MAT@W91MUc_EJo4;%E4 z>WnXKuhR5%(k(tzbHD(D9i?~Y4g3eji9=-&o1RbY|0uI*tkwyDBa>p@p$B)e@Z?&w{i%@zZB! zod+AMql^X-p9c>@M_=XEMzj29N&O2>-s0Dnf}Sa992n^ari1gunHNcr=pDMKjQ@>_ z9f7-yu^ir53Hjc;M~f#NFKH9-H^n5 zD77}?-7EU)CVrTH{R)DdQ|6O0z`j6ElUM7$n=#)g1KAmCobD@#(l5y`D5v(M@+rzNn}_}`L>(gmTQlYHk?n1V*!sH- z5~CvOiV<%MR2z2Z#iiN$fPEQ4HDExi?%wMBG1@P=yp{{x(!s{poL$9~tUdK2dw#uG zrpU=A-pG^64wnD3!}DMrby5G{fqOCf>(=MU*vyUgv8Sew&On&m+2R`@;Ga1lS#+ zu%>ue%|^!^eD|)Mw0jeXcPskLVp1N4j^uZ(hG9NIe}|BEellyZ)6GBH%M}}SVVVz1 zTx2VNK+zye{O!y*WNgiv`Pucm-*?Iv3`Z{mlN6HXYj2m<%I# z%&>!NS4Tdq?%Nw|rT55t_UEx zQ@IbnTsM>EUWoDhD}_%a(aLA~U@_t!RonjjUQ)<~N2v7u|NfX0JKOOlG%K2FcD+{5 zrC_y#s`~eDX_5hJfsbK%&8v@67giSb`=7^KEZLgee-0gzu|!aA?p9bl%tn3f z712+(1=DI7fHZcTsydtk{!ZKskOa^=GO!%2gFiJrErZyf0f)F^)BjD;K z#e)3D+1jcZAj0w#ML2m7a&C)ziH^=egIZ(aSH~hsk&EyJ+y2@9<~+rSjGgAYNL;ia zY-j8MaZJj{1yM5BmBNwIN(cvF6jc{sqbENnb4vKD_7fmr6O)s0;o;mdZd42App98U z=pCGcjO%VdXu!QlBUWwiNB()tkGu;;^z>40nwFV49^DJxl`cuKleY%Lz-+>Oumlfx zH{ZYC2jrY&>-9PVoQUI@wP%h)MGROGD-WH_NsLsKrOtlt^=clCX#02x2ygZ)YGrg> z;)v|5;+qVkJFjkiy4Ko=n}$h;ET$Po8YT??=_~pEU~ySuM5PJ@I&0 z?dfAw%xT7Zon#i^tXXHLhuH@V?$8`v!D!C9kFoC=0dE0kGy^8fVpND(XKABotJEn} zX7jGbE2?RI+~hY0vd*c51e?z7nm4li`|q%;urrqK?$^_cR^7ML?roAW&CTtqRlNI^ z9Gf_s^*K0Xj)UE^-u1K2T+DQVu)!*O-tYCS4AnapA3yg|$Y`5j+BzISJMZaMvS_q< zY{%w}!ZPf84<2kD?3|(tJz&IU`Fd%@ryC6ba8_sMT${f09`~ZnL{eYhd-P}fP@dHlDm`QG;7OPBiX3(Q)SZ^~SMQw{28!m3F^nD-9eo(|FQ`w{L^We!d7_A9TguZNW#?8b$Y! zA@8>9tKI8D+hns%H*(lj+1SQxle><-4>p8rL2$o7ao&lhw2{^2`4_owa>)wf#n2DW z7B2hpZ`wgq6O%oAIt@>m=&fqcAvri0?@jTmxN9%Vyw-hS;>3B6svrJ^t~gX4=`Kdq zSc6YD2ZH>Amm65W?|=cR;0@IV8^+C9vgC9Xg2g39c~~FnKj)$D;`kD4ZIxIwy-Ws@ z&4cQ{Xy~Iht8cq#U%6ry691!IHVS(FSdU2`0>1DZFX+K9J%fr!oAES zwLM(uU-I*v?qYmiYbsiK+ASG9Ye{F5V7)AA`w#qNwDa7*F{TDlx_vgZi@_ zt$Th5si_F5{gVu{;V<2DFP$$5EYyAQ;6XHClP6 zerdhz+4-?hds~bqKk%iu23vjuZ0Z zj3KiO+0``3I)7z~W|FRYp{s+P-D7lMOACw8u5;GMHfpJQ0tx!QZp6^&l901C*ULt? z-JATCS!UytRoCyn{!aw5T@g7ebI8-#q4Z?@3mH0{osy~6`1ja5`Asif>>b>#v9ehr z|61FI8_k29^|;H>ZQCl`M{)JzIj0w^ER^S zjxwo*9v4%h%cURRKAxtGb)a|G4NThBD@mKiR^a}Wj~`Dx55pF2*rLVPAA4&qTW)UD zA{|Ql?4P!yk33&da|P)kf<|R)_25`L}DhG3+|Lvf8~7snIypta|J_ z->q?9xq^5Ne4e$&+vjoLQ!Y?VFDZcri}#v6{48)qoPCj8 z|EG3Ox|1i%mBD+ujLZpg9_o;(@sn#!`!)W3GVSy+asehUPHsDYlAgXGm==j^{YEF; zkbH3aO)=NiM}desX*>QS!#CymV5f7XNA^KV%22~^d0t+9|HfOmhIT>!w@Yu_*m#jH z{>f@SPhKGveb~{{`+VqLy&U8Shq{A)5hH1z0Rt{Ae|a^VpXmEHKJ=lB?kfIF_N+9R zj5w3yJ-XWEC*tb-;cGse_hoj3b+0yqQp-Ld4MfFWyl6$VxX5gV;#|)StBcZ;B!+~j z@0;`tuc!k>_Rs~1jh}Pt>2EiVeqaE=P^Y}{Wg}9h*V!vM|6rSj8{@bcdAgOA!~Uh| z#|`Bhae|V25nj6s-Vz8-*EicX)o7W*RQ@@(2!|IiFvp>{s|;RrzW-IbQ*>-(&7D90 zs5~Is>ff5`>hC!hmj&iH^QC_rlaDz$`Os1}iL1&8m-9i+Mmr~lUWHVm+waSr;uR-I z{~@VcZqMkk?=kOs8evLH?eI}@R z`>3|;6U@c(r_Y~1Eo0=X<^)+-sWJW0oTxk9;Em(Vo)dRZVV6OfN2~%y9E^{iCV@<@ z@GXu^mKguVdEmdJKmj7)1k9E2If!F z2u)#EsVfeo@OrYi0Abn7izd)GwO?@S%ceute3?+V?<(U@-$NS2YEmJz+!jHNVM zX%umM?M|YpUJkcK=dis_X&s$130f+LYuW<>zV%5m9BdI0aSDUH!_9xSwQclJk zACLELIzi>rS8`7b{t8|7uSFRKMLLC)zwd* z1gKPveCT`7**tP?TxQth_F;%-N9?rCaJdZDihT+o;l4vdi~d2(98-IV2Q^^S`m0x0 z74=Zi$;7y1vJCQs_>fNfw>g_CC@9DIM>JncUhL52&yEbO`Z=TaOBBeLShtt1$s;wX zi);sLRmA~sEzQl3tzw%Pmb^k}IaIrV86lYLPxj3HejqULG|W(+U^NZ8s zc7MrF;#}v{vbRC%S{4Q})pr$o=#tdKaaRiu*PJDxnZ=hhQ)u~T)J$W3`HaOxwEf26 z-;6zpsRgcw^>(SZamfD}KWcCLrkWjl{cY=z3hD|<=gSx+8}iw;&!urZr93`zRgm-N zaoxiTg4iN2YPSgQ-64A6PeBD?G;Ty-OFpfYqIveW28~WV3g6QytD}|$(BJJ_Uvr{> zIr|3xc$SKnX7Je|)>$G^N@NPozWC9iZTt48xd-#s(-~|WC#R?!It?s?voHMZHEM66 z>w66~2nPPzEOOAX$Bil^#aG^XU5Y0B+8{=74X6A_$d?zQv!X^~K&7C+I zC2*HOZZg2^42!AtDJo`-lpZL)0HS&nBq9>j=mU3E0>X?A8U zIu4w7BnJn?)Zjr{S(!^Mc{-*hfvo2ficZJIT6X?p>=CwFQL6Dr|Ko&WZb$0fSt|ns zcdDnG-0^sjL${*oLtpQV5P$#wIbiF5d?BK{l~jFSjVQl5^TmrZu=^i#6J+0L23J@0 zY1?x9*PJDhplprc7RAFj9#(N~ZBHLLGDed=%V~YvWt@9?Eo&-8k+=j5O+6pkLwj0r zXL~wY?Q|j(s;+Nva6k9;ysDEN-5k`}K%>Q+ZL4V8J-s3l-u2QAV{o5k`nB-_YEg06 zTj{PfRjp#s_*$rFPtu%~oxST_;pvkn4GG>3sShQ|aZp#=GAiy=WaJFl*hqusx8oB3Lizqz%Q4r(JqH6K~5!|SSb z|MD@s=bt{v!it^sR5N!37e{op2QWL>+fQ8=vi_TY7q{~6^yM%9LA%>Yq1$+D`djI5XAfd43 z_91Rx><;+){(Ge`#KO|@07u$r;lfZE(X#C9$$g7{EK$4XYuc+#TH;P6OQPQ5!cnK$ zA^!g6Y*OZkuF72>J#FU96NZJR=0`Uu#ykH4`|O6FnzlY9I%gM02(Jub8i?QaBTAHkpy7cf_T3T71@`WrX^Vev3#zPVvXycXi1(1U~^ zvnJoP%c=(La18?$ zM*{RGEbFmvhe&R&ht*ZP+}YhZ6!-IF*v}_~sjr*6R*oT#$*F;DK69vD!twMAgQYt6 zj8rnyo9d%(_5>4GWU~n4w3@5mtjMCg8VA-xMmjO{KSFNcR2jO|&;07zl5;+$qh=*k zy)0P;B__d>%FlfL`nbccKkS7LYo3AmH}d+!V$${IC}EJwd^fdB;|OGzo6}u*OBs1NkMfNVN)9= zd;NtqXU|=I-}~&`I4jVNZ^=)q1dlkm6pG|8Fe5w$pp9#2COqC#8ghp4FP)tkrO&(z z3s@Y~icrjlcBPZOhS+}SFozjG#5wc(P1J*qu(956Eev_Op}J~_?ZXZn>$s|K&t(O) z%TILw;gXfi!GEuz+Hy%zdf3$u=?_-XEq{_qhe-C6_PDRbg&t>>?(BWah?-DGoxTbR zpZ@5mpkT9!xbU3$i3@Qh()PiK$>r0hWoGo4)p|HS%8$&4h$SctFFq4}CXRW|tkyqM*2XX7GJ~V5IaD981Vq1z z+k1XeuLWU${q^M9+7%BTJc`A<_e}V<{Y#nma!?AGPFibrfBo;}bE)|H{Fn9M>Y9%Z z?2IR&2CbTH)dy7SSVt|`%2!`ff@BnBr|pntmwC)}s@M1)YWKL{u&>GySikXO=z zZMP?r^{R>!s!y}}l)M+`#<|U+AUu2G#9b-*0|zbKW_W=y5!Mo%=mpQ!GMsL0y(Abb zdbIWOD@kr&{+)kCS9kjK>9%lp2ZMu`Rs2yg6o%nBfN8bBy+FOyr*u=J?L6YYo;-0L zQ;2os+@x<`EX=YsBMv(xt2*yqmJ@T&yj|bfcC+^Fn8Qq)p{}35_ieLt=G3YC*vHo3 z--5SqpUG_J=j7%vnY^fW@9m=bxtAPumcDU(cFW@#d1LGCc+hHbTIhE9JcZX&gi|xC zdg?jr(tj`ez1@C>_dGo_I)X16sBOQ$>hnH>Gcm_A;jBh1S%HN_?cRS^i~CRR)#jk7 zvvV{=-RU)%Qw;)3eomh~dnqeCede<)FyRMg`ou!;SQ{Xhf5XU}zH41uO86+8*u^ie zT`DMemy^`qW+^u#A2GbZJpG&Ih%NJkFomW5K*fCF*s&gcK3LhwTZ=PT%UfHMXQ-ad zFo~s<)Iyq^q--?2kac>7wE>#+^l8%;2?Z18Olb1!wGQ3Ah15jRqua>d9N*(8?xM3+ zu|^^87fcE0DtfC<2<+#r75_Q9e50C4z;On;Xcj=JNTzAjyaVkFgLMlt{XxwVLug`~ z#_u}2Vu`^~yA9`SGnM~|{KEYw@i2qa*|9^2Nk^QkT)-A+7$ zlhlUfhXHiph6OGjm%KVXa_(7%dbH>|YIcK0&8P6M>siG*9H5yojG8ZYkX9@O@k1h_ zHhEBD-l&JhUYteD=c%YJRp{tR4J1?ef8VOQiAA9TMH5SgE z7n07hpCM7T=&VQeJIg`t{iu47Q0a9pq1uh9f2$ejlR=X}3)0yL1rYcPo7Rqr z$VAqR6H`OA;g`6`&5o-~B*H^zhY!WMl}QWV5$sT4o|B|^W>jvJJZYzW>do!#OKxoV z?f$)cQw@;+EQkT}ZiR(>nx1~_?+2r%X$M3Qccyn<&jt2YmzD=JLol4PE5-P}+Ktz7 zQm-~{{~p~QgQ@qC-=J|&Qw{^#dD)|YU8VB)is-0MadtBPlwb|wnBBUMYtB|JI!l_{ zkSfzh<6r{cXGH8(GdtY3Fepa?hK$@GbUpIdGN8dDCjJzY>W%{?W<0g2Q4+rR^9VDA z4Wo%%l7M4bysGC3Re{vOS#!aL((KO;oXlXCbjg~Qv5fJUgK#NCGWdtx)=)tqUNv4p z;qDa0hP2*ZikWv)pWiqKitIBTKqrFxgtFJwb+XH(BbzbjP6c5D2FAyw}Q2<95f>GTH( zJv_A3W%hu$FZ;#m_VwfQ88FAn3JT?JpZ@@;vYDAvbq&w#&RJ0wBEDzMR0Af5odV?~pNxAj5R$b2tV>G%d+wAMh5|0_|>gzl|=8fYtV{&!z z>Hr<5WAefM=Ui>9;AjWW@&qgVNpM#2#pla2@Xtgp^wyo2qH8&0MlsX(GMOB+nt*eH zu=zW2#!MFNA7|6hiF?wyU+A38qlyCuug8sFkx+H9$D8v%z7J;I#!rTA^lJ06SF z%l)1eyJQA%^EaFER6#UVg;Ops!AbUmDHJgnIfY7s9C=Lp#&H2V)ObHmk%Xr2?J|-g zTV3IX&B_#euBdTiK;V}zT4qa%hEHbj&_C&no0b<{dX;k5nT@X*B*I=*AMbP>}7A& z{WUm`C#d^t(L=y$-CuY8V6p1{`e%^YuYc^e@bIsH9CK&HZ}tDYVK+5){a+m^VCw%G q{O 1: if self._use_embedding_variable: diff --git a/easy_rec/python/inference/vector_retrieve.py b/easy_rec/python/inference/vector_retrieve.py index 917853484..6c10f252a 100644 --- a/easy_rec/python/inference/vector_retrieve.py +++ b/easy_rec/python/inference/vector_retrieve.py @@ -10,14 +10,14 @@ import common_io import numpy as np import tensorflow as tf + try: import graphlearn as gl except: logging.WARN( - 'GraphLearn is not installed. You can install it by "pip install http://odps-release.cn-hangzhou.oss-cdn.aliyun-inc.com/graphlearn/tunnel/graphlearn-0.7-cp27-cp27mu-linux_x86_64.whl."' # noqa: E501 + 'GraphLearn is not installed. You can install it by "pip install http://odps-release.cn-hangzhou.oss-cdn.aliyun-inc.com/graphlearn/tunnel/graphlearn-0.7-cp27-cp27mu-linux_x86_64.whl."' # noqa: E501 ) - if tf.__version__ >= '2.0': tf = tf.compat.v1 diff --git a/easy_rec/python/input/group_rtp_input.py b/easy_rec/python/input/group_rtp_input.py new file mode 100644 index 000000000..5959b1dc4 --- /dev/null +++ b/easy_rec/python/input/group_rtp_input.py @@ -0,0 +1,201 @@ +# -*- encoding:utf-8 -*- +# Copyright (c) Alibaba, Inc. and its affiliates. +import logging + +import numpy as np +import tensorflow as tf + +from easy_rec.python.input.input import Input + +if tf.__version__ >= '2.0': + tf = tf.compat.v1 + + +class GroupRTPInput(Input): + """GroupRTPInput for parsing group rtp fg input format. + + Our new format(csv in csv) of rtp output: + label0, label1, grouped features, img, group_size + here the separator(,) could be specified by data_config.rtp_separator + For the feature column, features are separated by , + multiple values of one feature are separated by , such as: + ...20beautysmartParis... + The features column and labels are specified by data_config.selected_cols, + columns are selected by indices as our csv file has no header, + such as: 0,1,4, means the 4th column is features, the 1st and 2nd + columns are labels + """ + + def __init__(self, + data_config, + feature_config, + input_path, + task_index=0, + task_num=1): + super(GroupRTPInput, self).__init__(data_config, feature_config, input_path, + task_index, task_num) + logging.info('input_fields: %s label_fields: %s' % + (','.join(self._input_fields), ','.join(self._label_fields))) + self._rtp_separator = self._data_config.rtp_separator + self._group_sample_separator = self._data_config.group_sample_separator + if not isinstance(self._rtp_separator, str): + self._rtp_separator = self._rtp_separator.encode('utf-8') + if not isinstance(self._group_sample_separator, str): + self._group_sample_separator = self._group_sample_separator.encode( + 'utf-8') + self._selected_cols = [ + int(x) for x in self._data_config.selected_cols.split(',') + ] + self._num_cols = -1 + self._feature_col_id = self._selected_cols[-3] + self._img_col_id = self._selected_cols[-2] + self._group_size_col_id = self._selected_cols[-1] + + logging.info('rtp separator = %s' % self._rtp_separator) + logging.info('group sample separator = %s' % self._group_sample_separator) + + def _parse_csv(self, line): + record_defaults = ['' for i in range(self._num_cols)] + lbl_id = 0 + for x, t, v in zip(self._input_fields, self._input_field_types, + self._input_field_defaults): + if x not in self._label_fields: + continue + record_defaults[self._selected_cols[lbl_id]] = self.get_type_defaults( + t, v) + lbl_id += 1 + + # the actual features are in one single column + record_defaults[self._feature_col_id] = self._data_config.separator.join([ + str(self.get_type_defaults(t, v)) + for x, t, v in zip(self._input_fields, self._input_field_types, + self._input_field_defaults) + if x not in self._label_fields + ][:-2]) + + fields = tf.string_split(line, self._rtp_separator, skip_empty=False) + fields = tf.reshape(fields.values, [-1, len(record_defaults)]) + + sample_fields = [] + for idx in range(len(record_defaults)): + field = tf.string_split( + fields[:, idx], self._group_sample_separator, skip_empty=False) + sample_fields.append(field.values) + sample_fields.append(fields[:, -2]) + sample_fields.append(fields[:, -1]) + + labels = [sample_fields[x] for x in self._selected_cols[:-3]] + + # only for features, labels excluded + record_defaults = [ + self.get_type_defaults(t, v) + for x, t, v in zip(self._input_fields, self._input_field_types, + self._input_field_defaults) + if x not in self._label_fields + ] + # assume that the last field is the generated feature column + logging.info('field_delim = %s' % self._data_config.separator) + fields = tf.string_split( + sample_fields[self._feature_col_id], + self._data_config.separator, + skip_empty=False) + tmp_fields = tf.reshape(fields.values, [-1, len(record_defaults) - 2]) + fields = [] + for i in range(len(record_defaults) - 2): + if type(record_defaults[i]) == int: + fields.append( + tf.string_to_number( + tmp_fields[:, i], tf.int64, name='field_as_int_%d' % i)) + elif type(record_defaults[i]) in [float, np.float32, np.float64]: + fields.append( + tf.string_to_number( + tmp_fields[:, i], tf.float32, name='field_as_flt_%d' % i)) + elif type(record_defaults[i]) in [str, type(u''), bytes]: + fields.append(tmp_fields[:, i]) + elif type(record_defaults[i]) == bool: + fields.append( + tf.logical_or( + tf.equal(tmp_fields[:, i], 'True'), + tf.equal(tmp_fields[:, i], 'true'))) + else: + assert 'invalid types: %s' % str(type(record_defaults[i])) + + field_keys = [x for x in self._input_fields if x not in self._label_fields] + effective_fids = [field_keys.index(x) for x in self._effective_fields] + inputs = {field_keys[x]: fields[x] for x in effective_fids[:-2]} + + for x in range(len(self._label_fields)): + inputs[self._label_fields[x]] = labels[x] + inputs['img_path'] = sample_fields[-2] + inputs['group_size'] = sample_fields[-1] + return inputs + + def _build(self, mode, params): + file_paths = tf.gfile.Glob(self._input_path) + assert len(file_paths) > 0, 'match no files with %s' % self._input_path + + # try to figure out number of fields from one file + with tf.gfile.GFile(file_paths[0], 'r') as fin: + num_lines = 0 + for line_str in fin: + line_tok = line_str.strip().split(self._rtp_separator) + if self._num_cols != -1: + assert self._num_cols == len(line_tok), '' + self._num_cols = len(line_tok) + num_lines += 1 + if num_lines > 10: + break + logging.info('num selected cols = %d' % self._num_cols) + + num_parallel_calls = self._data_config.num_parallel_calls + if mode == tf.estimator.ModeKeys.TRAIN: + logging.info('train files[%d]: %s' % + (len(file_paths), ','.join(file_paths))) + dataset = tf.data.Dataset.from_tensor_slices(file_paths) + if self._data_config.shuffle: + # shuffle input files + dataset = dataset.shuffle(len(file_paths)) + # too many readers read the same file will cause performance issues + # as the same data will be read multiple times + parallel_num = min(num_parallel_calls, len(file_paths)) + dataset = dataset.interleave( + tf.data.TextLineDataset, + cycle_length=parallel_num, + num_parallel_calls=parallel_num) + if self._data_config.chief_redundant: + dataset = dataset.shard( + max(self._task_num - 1, 1), max(self._task_index - 1, 0)) + else: + dataset = dataset.shard(self._task_num, self._task_index) + if self._data_config.shuffle: + dataset = dataset.shuffle( + self._data_config.shuffle_buffer_size, + seed=2020, + reshuffle_each_iteration=True) + dataset = dataset.repeat(self.num_epochs) + else: + logging.info('eval files[%d]: %s' % + (len(file_paths), ','.join(file_paths))) + dataset = tf.data.TextLineDataset(file_paths) + dataset = dataset.repeat(1) + + dataset = dataset.batch(batch_size=self._data_config.batch_size) + + dataset = dataset.map( + self._parse_csv, + num_parallel_calls=self._data_config.num_parallel_calls) + + # preprocess is necessary to transform data + # so that they could be feed into FeatureColumns + dataset = dataset.map( + map_func=self._preprocess, + num_parallel_calls=self._data_config.num_parallel_calls) + + dataset = dataset.prefetch(buffer_size=self._prefetch_size) + + if mode != tf.estimator.ModeKeys.PREDICT: + dataset = dataset.map(lambda x: + (self._get_features(x), self._get_labels(x))) + else: + dataset = dataset.map(lambda x: (self._get_features(x))) + return dataset diff --git a/easy_rec/python/input/input.py b/easy_rec/python/input/input.py index 500c6ed95..d12777383 100644 --- a/easy_rec/python/input/input.py +++ b/easy_rec/python/input/input.py @@ -4,6 +4,8 @@ from abc import abstractmethod from collections import OrderedDict +import cv2 +import numpy as np import six import tensorflow as tf @@ -142,7 +144,8 @@ def get_tf_type(self, field_type): def create_multi_placeholders(self, placeholder_named_by_input, - export_fields_name=None): + export_fields_name=None, + img_fea_dict=None): """Create multiply placeholders on export. Args: @@ -150,6 +153,7 @@ def create_multi_placeholders(self, otherwise the placeholder name if input_XX. Default: false. export_fields_name: TagFeature / SeqFeature list that needs to be converted into 2D placeholders when exporting. + img_fea_dict: img feature info, only for e2e_mm_dbmtl. """ self._mode = tf.estimator.ModeKeys.PREDICT effective_fids = list(self._effective_fids) @@ -168,6 +172,12 @@ def create_multi_placeholders(self, logging.info('multi value input_name: %s, dtype: %s' % (input_name, tf_type)) finput = tf.placeholder(tf_type, [None, None], name=placeholder_name) + elif img_fea_dict and input_name == img_fea_dict.get('input_name', None): + width = img_fea_dict['input_shape'].width + height = img_fea_dict['input_shape'].height + channel = img_fea_dict['input_shape'].channel + finput = tf.placeholder( + tf.float32, [None, width, height, channel], name=input_name) else: ftype = self._input_field_types[fid] tf_type = self.get_tf_type(ftype) @@ -442,6 +452,48 @@ def _preprocess(self, field_dict): if parsed_dict[input_0].dtype == tf.string: parsed_dict[input_0] = tf.string_to_number( parsed_dict[input_0], tf.int32, name='%s_str_2_int' % input_0) + elif feature_type == fc.ImgFeature: + + def _load_img(img_paths): + img_feas = [] + for img_path in img_paths: + if isinstance(img_path, bytes): + img_path = img_path.decode('utf-8') + if tf.gfile.Exists(img_path): + img_fea = np.asarray( + bytearray(tf.gfile.FastGFile(img_path, 'rb').read())) + img_fea = cv2.imdecode(img_fea, cv2.IMREAD_COLOR) + else: + img_fea = np.zeros(shape=(224, 224, 3)) + img_fea = img_fea.astype(np.float32) + img_feas.append(img_fea) + img_feas = np.array(img_feas) + return img_feas + + field = field_dict[input_0] + if (len(field.get_shape()) == 1): + img_fea = tf.py_func(_load_img, [field], Tout=tf.float32) + parsed_dict[input_0] = img_fea + else: + parsed_dict[input_0] = field + elif feature_type == fc.SampleNumFeature: + + def _repeat_sample(sample_nums): + sample_num_feas = [] + idx = 0 + for sample_num in sample_nums: + if isinstance(sample_num, bytes): + sample_num = sample_num.decode('utf-8') + if isinstance(sample_num, str): + sample_num = int(sample_num) + sample_num_feas.extend([idx] * sample_num) + idx += 1 + sample_num_feas = np.array(sample_num_feas) + return sample_num_feas + + sample_num_fea = tf.py_func( + _repeat_sample, [field_dict[input_0]], Tout=tf.int64) + parsed_dict[input_0] = sample_num_fea else: for input_name in fc.input_names: parsed_dict[input_name] = field_dict[input_name] @@ -570,9 +622,15 @@ def _input_fn(mode=None, params=None, config=None): export_fields_name = export_config.multi_value_fields.input_name else: export_fields_name = None + img_fea_dict = None + if export_config.img_input_name and export_config.img_shape: + img_fea_dict = { + 'input_name': export_config.img_input_name, + 'input_shape': export_config.img_shape + } placeholder_named_by_input = export_config.placeholder_named_by_input inputs, features = self.create_multi_placeholders( - placeholder_named_by_input, export_fields_name) + placeholder_named_by_input, export_fields_name, img_fea_dict) return tf.estimator.export.ServingInputReceiver(features, inputs) else: inputs, features = self.create_placeholders(export_config) diff --git a/easy_rec/python/input/odps_group_rtp_input.py b/easy_rec/python/input/odps_group_rtp_input.py new file mode 100644 index 000000000..845d125e1 --- /dev/null +++ b/easy_rec/python/input/odps_group_rtp_input.py @@ -0,0 +1,204 @@ +# -*- encoding:utf-8 -*- +# Copyright (c) Alibaba, Inc. and its affiliates. +import logging + +import numpy as np +import tensorflow as tf + +from easy_rec.python.input.input import Input +from easy_rec.python.protos.dataset_pb2 import DatasetConfig + +try: + import pai +except Exception: + pass + + +class OdpsGroupRTPInput(Input): + """GroupRTPInput for parsing rtp fg input format on odps. + + Our new format(csv in table) of rtp output: + label0, label1, grouped features, img, group_size + For the feature column, features are separated by , + multiple values of one feature are separated by , such as: + ...20beautysmartParis... + The features column and labels are specified by data_config.selected_cols, + columns are selected by names in the table + such as: clk,features, the last selected column is features, the first + selected columns are labels + """ + + def __init__(self, + data_config, + feature_config, + input_path, + task_index=0, + task_num=1): + super(OdpsGroupRTPInput, self).__init__(data_config, feature_config, + input_path, task_index, task_num) + logging.info('input_fields: %s label_fields: %s' % + (','.join(self._input_fields), ','.join(self._label_fields))) + + def _parse_table(self, *fields): + fields = list(fields) + + label_record_defaults = [ + t for x, t, v in zip(self._input_fields, self._input_field_types, + self._input_field_defaults) + if x in self._label_fields + ] + sample_fields = [] + # label + for idx in range(len(label_record_defaults)): + field = tf.string_split( + fields[idx], + self._data_config.group_sample_separator, + skip_empty=False) + if label_record_defaults[idx] in [DatasetConfig.INT32]: + field = tf.string_to_number(field.values, tf.int32) + elif label_record_defaults[idx] in [DatasetConfig.INT64]: + field = tf.string_to_number(field.values, tf.int64) + elif label_record_defaults[idx] in [DatasetConfig.FLOAT]: + field = tf.string_to_number(field.values, tf.float32) + elif field.values.dtype in [DatasetConfig.DOUBLE]: + field = tf.string_to_number(field.values, tf.float64) + else: + field = field.values + sample_fields.append(field) + # features + field = tf.string_split( + fields[-3], self._data_config.group_sample_separator, + skip_empty=False).values + sample_fields.append(field) + # pic_path + sample_fields.append(fields[-2]) + # group_size + sample_fields.append(fields[-1]) + + labels = sample_fields[:-3] + # only for features + record_defaults = [ + self.get_type_defaults(t, v) + for x, t, v in zip(self._input_fields, self._input_field_types, + self._input_field_defaults) + if x not in self._label_fields + ][:-2] + logging.info('field_delim = %s' % self._data_config.separator) + fields = tf.string_split( + sample_fields[-3], self._data_config.separator, skip_empty=False) + tmp_fields = tf.reshape(fields.values, [-1, len(record_defaults)]) + fields = [] + for i in range(len(record_defaults)): + if type(record_defaults[i]) == int: + fields.append( + tf.string_to_number( + tmp_fields[:, i], tf.int64, name='field_as_int_%d' % i)) + elif type(record_defaults[i]) in [float, np.float32, np.float64]: + fields.append( + tf.string_to_number( + tmp_fields[:, i], tf.float32, name='field_as_flt_%d' % i)) + elif type(record_defaults[i]) in [str, type(u''), bytes]: + fields.append(tmp_fields[:, i]) + elif type(record_defaults[i]) == bool: + fields.append( + tf.logical_or( + tf.equal(tmp_fields[:, i], 'True'), + tf.equal(tmp_fields[:, i], 'true'))) + else: + assert 'invalid types: %s' % str(type(record_defaults[i])) + + field_keys = [x for x in self._input_fields if x not in self._label_fields] + effective_fids = [field_keys.index(x) for x in self._effective_fields] + inputs = {field_keys[x]: fields[x] for x in effective_fids[:-2]} + + for x in range(len(self._label_fields)): + inputs[self._label_fields[x]] = labels[x] + + inputs[self._input_fields[-2]] = sample_fields[-2] + inputs[self._input_fields[-1]] = sample_fields[-1] + return inputs + + def _build(self, mode, params): + if type(self._input_path) != list: + self._input_path = [x for x in self._input_path.split(',')] + + # record_defaults = [ + # self.get_type_defaults(t, v) + # for x, t, v in zip(self._input_fields, self._input_field_types, + # self._input_field_defaults) + # if x in self._label_fields + # ] + record_defaults = [ + '' for x, t, v in zip(self._input_fields, self._input_field_types, + self._input_field_defaults) + if x in self._label_fields + ] + + # the actual features are in one single column + record_defaults.append( + self._data_config.separator.join([ + str(self.get_type_defaults(t, v)) + for x, t, v in zip(self._input_fields, self._input_field_types, + self._input_field_defaults) + if x not in self._label_fields + ])) + # pic_path + record_defaults.append('') + # group_size + record_defaults.append(np.int32(0)) + + selected_cols = self._data_config.selected_cols \ + if self._data_config.selected_cols else None + + if self._data_config.pai_worker_queue and \ + mode == tf.estimator.ModeKeys.TRAIN: + logging.info('pai_worker_slice_num = %d' % + self._data_config.pai_worker_slice_num) + work_queue = pai.data.WorkQueue( + self._input_path, + num_epochs=self.num_epochs, + shuffle=self._data_config.shuffle, + num_slices=self._data_config.pai_worker_slice_num * self._task_num) + que_paths = work_queue.input_dataset() + dataset = tf.data.TableRecordDataset( + que_paths, + record_defaults=record_defaults, + selected_cols=selected_cols) + else: + dataset = tf.data.TableRecordDataset( + self._input_path, + record_defaults=record_defaults, + selected_cols=selected_cols, + slice_id=self._task_index, + slice_count=self._task_num) + + if mode == tf.estimator.ModeKeys.TRAIN: + if self._data_config.shuffle: + dataset = dataset.shuffle( + self._data_config.shuffle_buffer_size, + seed=2020, + reshuffle_each_iteration=True) + dataset = dataset.repeat(self.num_epochs) + else: + dataset = dataset.repeat(1) + + dataset = dataset.batch(batch_size=self._data_config.batch_size) + + dataset = dataset.map( + self._parse_table, + num_parallel_calls=self._data_config.num_parallel_calls) + + # preprocess is necessary to transform data + # so that they could be feed into FeatureColumns + dataset = dataset.map( + map_func=self._preprocess, + num_parallel_calls=self._data_config.num_parallel_calls) + + dataset = dataset.prefetch(buffer_size=self._prefetch_size) + + if mode != tf.estimator.ModeKeys.PREDICT: + dataset = dataset.map(lambda x: + (self._get_features(x), self._get_labels(x))) + else: + dataset = dataset.map(lambda x: (self._get_features(x))) + return dataset diff --git a/easy_rec/python/layers/common_layers.py b/easy_rec/python/layers/common_layers.py index 883f2a67c..c668541a1 100644 --- a/easy_rec/python/layers/common_layers.py +++ b/easy_rec/python/layers/common_layers.py @@ -35,7 +35,8 @@ def highway(x, if size is None: size = x.shape.as_list()[-1] else: - x = tf.layers.dense(x, size, name='input_projection', reuse=reuse) + x = tf.layers.dense( + x, size, activation=activation, name='input_projection', reuse=reuse) for i in range(num_layers): T = tf.layers.dense( diff --git a/easy_rec/python/model/e2e_mm_dbmtl.py b/easy_rec/python/model/e2e_mm_dbmtl.py new file mode 100644 index 000000000..f810dd8fd --- /dev/null +++ b/easy_rec/python/model/e2e_mm_dbmtl.py @@ -0,0 +1,239 @@ +# -*- encoding:utf-8 -*- +# Copyright (c) Alibaba, Inc. and its affiliates. +import logging + +import tensorflow as tf +from tensorflow.keras.applications.resnet50 import ResNet50 + +from easy_rec.python.layers import dnn +from easy_rec.python.layers import mmoe +from easy_rec.python.layers.common_layers import highway +from easy_rec.python.model.multi_task_model import MultiTaskModel + +from easy_rec.python.protos.dbmtl_pb2 import E2E_MM_DBMTL as E2E_MM_DBMTL_Config # NOQA + +if tf.__version__ >= '2.0': + tf = tf.compat.v1 + + +class E2E_MM_DBMTL(MultiTaskModel): + + def __init__(self, + model_config, + feature_configs, + features, + labels=None, + is_training=False): + super(E2E_MM_DBMTL, self).__init__(model_config, feature_configs, features, + labels, is_training) + assert self._model_config.WhichOneof('model') == 'e2e_mm_dbmtl', \ + 'invalid model config: %s' % self._model_config.WhichOneof('model') + self._model_config = self._model_config.e2e_mm_dbmtl + assert isinstance(self._model_config, E2E_MM_DBMTL_Config) + + self._features, _ = self._input_layer(self._feature_dict, 'all') + + if 'img' in self._input_layer._feature_groups: + self._img_features, _ = self._input_layer(self._feature_dict, 'img') + + for feature_config in feature_configs: + if feature_config.feature_type == feature_config.ImgFeature: + assert feature_config.HasField( + 'img_shape'), 'ImgFeature must set img_shape.' + self.img_width = feature_config.img_shape.width + self.img_height = feature_config.img_shape.height + self.img_channel = feature_config.img_shape.channel + break + + if 'sample_num' in self._input_layer._feature_groups: + self._sample_idx_fea, _ = self._input_layer(self._feature_dict, + 'sample_num') + + if 'img_emb' in self._input_layer._feature_groups: + self._img_emb, _ = self._input_layer(self._feature_dict, 'img_emb') + self._init_towers(self._model_config.task_towers) + + def tune_img_emb(self, img_emb): + with tf.device('/CPU:0'): + if self._model_config.HasField('highway_dnn'): + emb_size = self._model_config.highway_dnn.emb_size + logging.info('highway_dnn used in img_emb, and emb_size is %s' % + emb_size) + img_emb = highway( + img_emb, emb_size, activation=tf.nn.relu, scope='highway_dnn') + + elif self._model_config.HasField('img_dnn'): + logging.info('img_dnn used in img_emb') + img_dnn = dnn.DNN( + self._model_config.img_dnn, + self._l2_reg, + name='img_dnn', + is_training=self._is_training) + img_emb = img_dnn(img_emb) + else: + logging.info('not using img_dnn and highway_dnn in img_emb') + return img_emb + + def img_net(self, img_feature): + with tf.device('/CPU:0'): + img_feature = tf.reshape( + img_feature, (-1, self.img_width, self.img_height, self.img_channel)) + # if self._model_config.img_model.model_name == 'ResNet': + # from easy_vision.python.core.backbones.nets.resnet_v1 import resnet_v1a_18 + # img_logit = resnet_v1a_18( + # img_feature, num_classes=self._model_config.img_model.num_classes, is_training=self._is_training)[0] + # elif self._model_config.img_model.model_name == 'MobileNet': + # from easy_vision.python.core.backbones.nets.mobilenet.mobilenet_v3 import mobilenet + # img_logit = mobilenet( + # img_feature, num_classes=self._model_config.img_model.num_classes, is_training=self._is_training)[0] + # else: + # assert False, "img_model must in [ResNet, MobileNet]" + + img_model = ResNet50( + include_top=True, + pooling='max', + classes=self._model_config.img_model.num_classes, + weights=None) + img_logit = img_model(img_feature) + + img_emb = self.tune_img_emb(img_logit) + self._prediction_dict['img_logits_test'] = img_emb + if 'sample_num' in self._input_layer._feature_groups: + # 扩展 img_emb, img_logits + sample_idx_fea = tf.cast(self._sample_idx_fea, tf.int32) + img_emb_expanded = tf.gather(img_emb, sample_idx_fea) + img_emb_expanded = tf.squeeze(img_emb_expanded, axis=1) + + img_logit_expanded = tf.gather(img_logit, sample_idx_fea) + img_logit_expanded = tf.squeeze(img_logit_expanded, axis=1) + self._prediction_dict['img_logits'] = img_logit_expanded + return img_emb_expanded + else: + return img_emb + + def build_predict_graph(self): + with tf.device('/CPU:0'): + base_features = tf.layers.batch_normalization( + self._features, + training=self._is_training, + trainable=True, + name='base_feature_emb_bn') + + if self._model_config.HasField('bottom_dnn'): + bottom_dnn = dnn.DNN( + self._model_config.bottom_dnn, + self._l2_reg, + name='bottom_dnn', + is_training=self._is_training) + bottom_fea = bottom_dnn(base_features) + else: + bottom_fea = base_features + + # for train + if 'img' in self._input_layer._feature_groups: + img_emb = self.img_net(self._img_features) + bottom_fea = tf.concat([img_emb, bottom_fea], axis=-1) + self._prediction_dict['img_emb'] = tf.reduce_join( + tf.as_string(img_emb), axis=-1, separator=',') + # for predict + elif 'img_emb' in self._input_layer._feature_groups: + bottom_fea = tf.concat([self._img_emb, bottom_fea], axis=-1) + + with tf.device('/CPU:0'): + # MMOE block + if self._model_config.HasField('expert_dnn'): + mmoe_layer = mmoe.MMOE( + self._model_config.expert_dnn, + l2_reg=self._l2_reg, + num_task=self._task_num, + num_expert=self._model_config.num_expert) + task_input_list = mmoe_layer(bottom_fea) + else: + task_input_list = [bottom_fea] * self._task_num + + tower_features = {} + # task specify network + for i, task_tower_cfg in enumerate(self._model_config.task_towers): + tower_name = task_tower_cfg.tower_name + if task_tower_cfg.HasField('dnn'): + tower_dnn = dnn.DNN( + task_tower_cfg.dnn, + self._l2_reg, + name=tower_name + '/dnn', + is_training=self._is_training) + tower_fea = tower_dnn(task_input_list[i]) + tower_features[tower_name] = tower_fea + else: + tower_features[tower_name] = task_input_list[i] + + tower_outputs = {} + relation_features = {} + # bayes network + for task_tower_cfg in self._model_config.task_towers: + tower_name = task_tower_cfg.tower_name + relation_dnn = dnn.DNN( + task_tower_cfg.relation_dnn, + self._l2_reg, + name=tower_name + '/relation_dnn', + is_training=self._is_training) + tower_inputs = [tower_features[tower_name]] + for relation_tower_name in task_tower_cfg.relation_tower_names: + tower_inputs.append(relation_features[relation_tower_name]) + relation_input = tf.concat( + tower_inputs, axis=-1, name=tower_name + '/relation_input') + relation_fea = relation_dnn(relation_input) + relation_features[tower_name] = relation_fea + + output_logits = tf.layers.dense( + relation_fea, + task_tower_cfg.num_class, + kernel_regularizer=self._l2_reg, + name=tower_name + '/output') + tower_outputs[tower_name] = output_logits + + self._add_to_prediction_dict(tower_outputs) + return self._prediction_dict + + def build_loss_graph(self): + """Build loss graph for multi task model.""" + for task_tower_cfg in self._task_towers: + tower_name = task_tower_cfg.tower_name + loss_weight = task_tower_cfg.weight * self._sample_weight + + if hasattr(task_tower_cfg, 'task_space_indicator_label') and \ + task_tower_cfg.HasField('task_space_indicator_label'): + in_task_space = tf.to_float( + self._labels[task_tower_cfg.task_space_indicator_label] > 0) + loss_weight = loss_weight * ( + task_tower_cfg.in_task_space_weight * in_task_space + + task_tower_cfg.out_task_space_weight * (1 - in_task_space)) + + self._loss_dict.update( + self._build_loss_impl( + task_tower_cfg.loss_type, + label_name=self._label_name_dict[tower_name], + loss_weight=loss_weight, + num_class=task_tower_cfg.num_class, + suffix='_%s' % tower_name)) + + if self._model_config.img_model.img_loss_weight: + label = tf.cast(self._labels['cate_label'], tf.int32) + img_loss = tf.losses.sparse_softmax_cross_entropy( + labels=label, logits=self._prediction_dict['img_logits']) + self._loss_dict[ + 'weighted_img_loss'] = img_loss * self._model_config.img_model.img_loss_weight + return self._loss_dict + + def get_outputs(self): + outputs = [] + for task_tower_cfg in self._task_towers: + tower_name = task_tower_cfg.tower_name + outputs.extend( + self._get_outputs_impl( + task_tower_cfg.loss_type, + task_tower_cfg.num_class, + suffix='_%s' % tower_name)) + if 'img' in self._input_layer._feature_groups: + outputs.append('img_emb') + outputs.append('img_logits_test') + return outputs diff --git a/easy_rec/python/protos/dataset.proto b/easy_rec/python/protos/dataset.proto index 2710d2d91..c4b8922c7 100644 --- a/easy_rec/python/protos/dataset.proto +++ b/easy_rec/python/protos/dataset.proto @@ -177,6 +177,10 @@ message DatasetConfig { // input pipelines DummyInput = 8; KafkaInput = 13; + // for e2e multimodal input on local or ds + GroupRTPInput = 20; + // for e2e multimodal input on odps + OdpsGroupRTPInput = 21; } required InputType input_type = 10; @@ -211,6 +215,9 @@ message DatasetConfig { // for RTPInput only optional string rtp_separator = 17 [default = ';']; + // for GroupRTPInput/OdpsGroupRTPInput, it is usually set to '\001' + // CTRL+A could be set as '\001' + optional string group_sample_separator = 26 [default = ',']; // ignore some data errors // it is not suggested to set this parameter diff --git a/easy_rec/python/protos/dbmtl.proto b/easy_rec/python/protos/dbmtl.proto index 57a7733b2..6675a6078 100644 --- a/easy_rec/python/protos/dbmtl.proto +++ b/easy_rec/python/protos/dbmtl.proto @@ -3,6 +3,7 @@ package protos; import "easy_rec/python/protos/dnn.proto"; import "easy_rec/python/protos/tower.proto"; +import "easy_rec/python/protos/layer.proto"; message DBMTL { // shared bottom dnn layer @@ -16,3 +17,26 @@ message DBMTL { // l2 regularization optional float l2_regularization = 5 [default = 1e-4]; } + +message ImgModel { + required string model_name = 1 [default = 'ResNet']; + optional uint32 num_classes = 2; + optional float img_loss_weight = 3; +}; + +message E2E_MM_DBMTL { + // shared bottom dnn layer + optional DNN bottom_dnn = 1; + // mmoe expert dnn layer definition + optional DNN expert_dnn = 2; + // number of mmoe experts + optional uint32 num_expert = 3 [default=0]; + // bayes task tower + repeated BayesTaskTower task_towers = 4; + // l2 regularization + optional float l2_regularization = 5 [default = 1e-4]; + + optional DNN img_dnn = 6; + optional HighWayTower highway_dnn = 7; + optional ImgModel img_model = 8; +} diff --git a/easy_rec/python/protos/easy_rec_model.proto b/easy_rec/python/protos/easy_rec_model.proto index 6f8ca590d..f636848fc 100644 --- a/easy_rec/python/protos/easy_rec_model.proto +++ b/easy_rec/python/protos/easy_rec_model.proto @@ -73,6 +73,8 @@ message EasyRecModel { PLE ple = 305; RocketLaunching rocket_launching = 401; + + E2E_MM_DBMTL e2e_mm_dbmtl = 501; } repeated SeqAttGroupConfig seq_att_groups = 7; // implemented in easy_rec/python/model/easy_rec_estimator diff --git a/easy_rec/python/protos/export.proto b/easy_rec/python/protos/export.proto index b5b419118..34a18b264 100644 --- a/easy_rec/python/protos/export.proto +++ b/easy_rec/python/protos/export.proto @@ -1,5 +1,6 @@ syntax = "proto2"; package protos; +import "easy_rec/python/protos/feature_config.proto"; message MultiValueFields { repeated string input_name = 1; @@ -47,4 +48,8 @@ message ExportConfig { // filter out inputs, only keep effective ones optional bool filter_inputs = 12 [default = true]; + + // only for e2e_mm_dbmtl + optional string img_input_name = 13; + optional ImgShape img_shape = 14; } diff --git a/easy_rec/python/protos/feature_config.proto b/easy_rec/python/protos/feature_config.proto index 18ef12ea1..98330e62d 100644 --- a/easy_rec/python/protos/feature_config.proto +++ b/easy_rec/python/protos/feature_config.proto @@ -28,6 +28,12 @@ message SequenceCombiner { } } +message ImgShape { + optional uint32 width = 1 [default = 224]; + optional uint32 height = 2 [default = 224]; + optional uint32 channel = 3 [default = 3]; +} + message FeatureConfig { enum FeatureType { IdFeature = 0; @@ -36,6 +42,8 @@ message FeatureConfig { ComboFeature = 3; LookupFeature = 4; SequenceFeature = 5; + ImgFeature = 6; + SampleNumFeature = 7; } optional string feature_name = 1; @@ -94,6 +102,9 @@ message FeatureConfig { // sequence feature combiner optional SequenceCombiner sequence_combiner = 25; + + // only for ImgFeature + optional ImgShape img_shape = 26; } message FeatureConfigV2 { diff --git a/easy_rec/python/test/odps_run.py b/easy_rec/python/test/odps_run.py index 4b6179fbc..443db118b 100644 --- a/easy_rec/python/test/odps_run.py +++ b/easy_rec/python/test/odps_run.py @@ -196,12 +196,8 @@ def test_boundary_test(self): tot.drop_table() def test_vector_retrieve(self): - start_files = [ - 'vector_retrieve/create_inner_vector_table.sql' - ] - test_files = [ - 'vector_retrieve/run_vector_retrieve.sql' - ] + start_files = ['vector_retrieve/create_inner_vector_table.sql'] + test_files = ['vector_retrieve/run_vector_retrieve.sql'] end_file = ['vector_retrieve/drop_table.sql'] tot = OdpsTest(start_files, test_files, end_file, odps_oss_config) tot.start_test() diff --git a/easy_rec/python/test/train_eval_test.py b/easy_rec/python/test/train_eval_test.py index a3bc6fc92..80e03015d 100644 --- a/easy_rec/python/test/train_eval_test.py +++ b/easy_rec/python/test/train_eval_test.py @@ -595,6 +595,11 @@ def test_multi_optimizer(self): 'samples/model_config/wide_and_deep_two_opti.config', self._test_dir) self.assertTrue(self._success) + def test_e2e_mm_dbmtl(self): + self._success = test_utils.test_single_train_eval( + 'samples/model_config/taobao_fg_e2e_mm_dbmtl.config', self._test_dir) + self.assertTrue(self._success) + if __name__ == '__main__': tf.test.main() diff --git a/easy_rec/python/tools/export_mm_model.py b/easy_rec/python/tools/export_mm_model.py new file mode 100644 index 000000000..ed2afed5d --- /dev/null +++ b/easy_rec/python/tools/export_mm_model.py @@ -0,0 +1,202 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +import logging +import os + +import tensorflow as tf +from tensorflow.python.lib.io import file_io +from tensorflow.python.saved_model import signature_constants + +from easy_rec.python.main import export +from easy_rec.python.utils import config_util + +FLAGS = tf.app.flags.FLAGS +tf.app.flags.DEFINE_string('model_dir', '', '') +tf.app.flags.DEFINE_string('pipeline_config_path', None, '') +tf.app.flags.DEFINE_string('checkpoint_path', '', 'checkpoint to be exported') +tf.app.flags.DEFINE_string('rec_model_export_dir', None, + 'directory where rec model should be exported to') +tf.app.flags.DEFINE_string('total_model_export_dir', None, + 'directory where total model should be exported to') +tf.app.flags.DEFINE_string('img_model_export_dir', None, + 'directory where img model should be exported to') +tf.app.flags.DEFINE_string('asset_files', '', 'more files to add to asset') +logging.basicConfig( + level=logging.INFO, format='[%(asctime)s][%(levelname)s] %(message)s') + + +def search_pb(directory): + dir_list = [] + for root, dirs, files in tf.gfile.Walk(directory): + for f in files: + _, ext = os.path.splitext(f) + if ext == '.pb': + dir_list.append(root) + if len(dir_list) == 0: + raise ValueError('savedmodel is not found in directory %s' % directory) + elif len(dir_list) > 1: + raise ValueError('multiple saved model found in directory %s' % directory) + + return dir_list[0] + + +def cut_model(input_savemodel_path, output_img_savemodel_path, + img_path_fea_name): + model_dir = search_pb(input_savemodel_path) + + graph = tf.Graph() + session_config = tf.ConfigProto() + session_config.log_device_placement = True + session_config.allow_soft_placement = True + session_config.intra_op_parallelism_threads = 10 + session_config.inter_op_parallelism_threads = 10 + session_config.gpu_options.allow_growth = True + + with tf.Session(config=session_config, graph=graph) as sess: + + def device_func(o): + return '/device:CPU:0' + + with tf.device(device_func): + meta_graph_def = tf.saved_model.loader.load( + sess, [tf.saved_model.tag_constants.SERVING], model_dir) + + input_map_names = {} + output_map_names = {} + + signature_def = meta_graph_def.signature_def[ + signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] + inputs = signature_def.inputs + for name, tensor in inputs.items(): + if name in [img_path_fea_name]: + tensor = graph.get_tensor_by_name(tensor.name) + input_map_names[name] = tensor + + outputs = signature_def.outputs + for name, tensor in outputs.items(): + if name in ['img_emb']: + tensor = graph.get_tensor_by_name(tensor.name) + output_map_names[name] = tensor + + inputs = {} + for k, v in input_map_names.items(): + inputs[k] = tf.saved_model.utils.build_tensor_info(v) + outputs = {} + for k, v in output_map_names.items(): + outputs[k] = tf.saved_model.utils.build_tensor_info(v) + + prediction_signature = ( + tf.saved_model.signature_def_utils.build_signature_def( + inputs=inputs, + outputs=outputs, + method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME)) + + builder = tf.saved_model.builder.SavedModelBuilder( + output_img_savemodel_path) + builder.add_meta_graph_and_variables( + sess, [tf.saved_model.tag_constants.SERVING], + signature_def_map={ + signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: + prediction_signature, + }) + builder.save() + + +def main(argv): + + assert FLAGS.model_dir or FLAGS.pipeline_config_path, 'At least one of model_dir and pipeline_config_path exists.' + if FLAGS.model_dir: + pipeline_config_path = os.path.join(FLAGS.model_dir, 'pipeline.config') + if file_io.file_exists(pipeline_config_path): + logging.info('update pipeline_config_path to %s' % pipeline_config_path) + else: + pipeline_config_path = FLAGS.pipeline_config_path + else: + pipeline_config_path = FLAGS.pipeline_config_path + + pipeline_config = config_util.get_configs_from_pipeline_file( + pipeline_config_path) + feature_configs = config_util.get_compatible_feature_configs(pipeline_config) + total_model_pipeline_filename = 'pipeline_total_model.config' + total_model_pipeline_path = os.path.join(FLAGS.model_dir, + total_model_pipeline_filename) + rec_model_pipeline_filename = 'pipeline_rec_model.config' + rec_model_pipeline_path = os.path.join(FLAGS.model_dir, + rec_model_pipeline_filename) + + # step 1 : modify config for total model + drop_idx = None + sample_num_fea_name = None + for idx, fea_config in enumerate(feature_configs): + if fea_config.feature_type == fea_config.SampleNumFeature: + sample_num_fea_name = fea_config.input_names[0] + drop_idx = idx + break + del feature_configs[drop_idx] + assert sample_num_fea_name is not None, 'SampleNumFeature has not be set in %s.' % pipeline_config_path + + drop_idx = None + for idx, input_config in enumerate(pipeline_config.data_config.input_fields): + if input_config.input_name == sample_num_fea_name: + drop_idx = idx + break + del pipeline_config.data_config.input_fields[drop_idx] + + drop_idx = None + for idx, fea_group_config in enumerate( + pipeline_config.model_config.feature_groups): + if len(fea_group_config.feature_names + ) == 1 and fea_group_config.feature_names[0] == sample_num_fea_name: + drop_idx = idx + del pipeline_config.model_config.feature_groups[drop_idx] + + config_util.save_pipeline_config( + pipeline_config, FLAGS.model_dir, filename=total_model_pipeline_filename) + + # step 2 : export total model + export(FLAGS.total_model_export_dir, total_model_pipeline_path, + FLAGS.checkpoint_path, FLAGS.asset_files) + + # step 3 : modify config for rec model + model_config = pipeline_config.model_config.e2e_mm_dbmtl + if model_config.HasField('highway_dnn'): + emb_size = model_config.highway_dnn.emb_size + elif model_config.HasField('img_dnn'): + emb_size = model_config.img_dnn.hidden_units[-1] + else: + emb_size = model_config.img_model.num_classes + + img_path_fea_name = None + for idx, fea_config in enumerate(feature_configs): + if fea_config.feature_type == fea_config.ImgFeature: + img_path_fea_name = fea_config.input_names[0] + fea_config.input_names[0] = 'img_emb' + fea_config.feature_type = fea_config.RawFeature + fea_config.separator = ',' + fea_config.raw_input_dim = emb_size + + assert img_path_fea_name is not None, 'ImgFeature has not be set in %s.' % pipeline_config_path + + for idx, input_config in enumerate(pipeline_config.data_config.input_fields): + if input_config.input_name == img_path_fea_name: + input_config.input_name = 'img_emb' + break + + for idx, fea_group_config in enumerate( + pipeline_config.model_config.feature_groups): + if fea_group_config.group_name == 'img': + fea_group_config.group_name = 'img_emb' + fea_group_config.feature_names[0] = 'img_emb' + config_util.save_pipeline_config( + pipeline_config, FLAGS.model_dir, filename=rec_model_pipeline_filename) + + # step 4 : export rec model + export(FLAGS.rec_model_export_dir, rec_model_pipeline_path, + FLAGS.checkpoint_path, FLAGS.asset_files) + + # step 5 : cut img model from all model + cut_model(FLAGS.total_model_export_dir, FLAGS.img_model_export_dir, + img_path_fea_name) + + +if __name__ == '__main__': + tf.app.run() diff --git a/easy_rec/version.py b/easy_rec/version.py index 4dbed0c5a..be9f8f6ff 100644 --- a/easy_rec/version.py +++ b/easy_rec/version.py @@ -1,3 +1,3 @@ # -*- encoding:utf-8 -*- # Copyright (c) Alibaba, Inc. and its affiliates. -__version__ = '0.3.1' +__version__ = '0.3.2' diff --git a/samples/model_config/taobao_fg_e2e_mm_dbmtl.config b/samples/model_config/taobao_fg_e2e_mm_dbmtl.config new file mode 100644 index 000000000..11902eb21 --- /dev/null +++ b/samples/model_config/taobao_fg_e2e_mm_dbmtl.config @@ -0,0 +1,315 @@ +train_input_path: "data/test/rtp/taobao_train_feature_for_e2e_mm_dbmtl.txt" +eval_input_path: "data/test/rtp/taobao_test_feature_for_e2e_mm_dbmtl.txt" +model_dir: "experiments/taobao_e2e_mm_dbmtl_demo" + +train_config { + optimizer_config { + use_moving_average: false + adam_optimizer { + learning_rate { + exponential_decay_learning_rate { + initial_learning_rate: 0.0001 + decay_steps: 100000 + decay_factor: 0.5 + min_learning_rate: 1e-07 + } + } + } + } + num_steps: 20 + sync_replicas: true + save_summary_steps: 10 + log_step_count_steps: 10 + save_checkpoints_steps: 10 +} + +data_config { + batch_size: 16 + label_fields: "clk" + input_type: GroupRTPInput + separator: "" + group_sample_separator: "" + rtp_separator: ";" + selected_cols: "0,3,4,5" + input_fields { + input_name: "clk" + input_type: INT32 + default_val: "0" + } + input_fields { + input_name: "user_id" + } + input_fields { + input_name: "cms_segid" + } + input_fields { + input_name: "cms_group_id" + } + input_fields { + input_name: "age_level" + } + input_fields { + input_name: "pvalue_level" + } + input_fields { + input_name: "shopping_level" + } + input_fields { + input_name: "occupation" + } + input_fields { + input_name: "new_user_class_level" + } + input_fields { + input_name: "adgroup_id" + } + input_fields { + input_name: "cate_id" + } + input_fields { + input_name: "campaign_id" + } + input_fields { + input_name: "customer" + } + input_fields { + input_name: "brand" + } + input_fields { + input_name: "price" + input_type: DOUBLE + default_val: "0.0" + } + input_fields { + input_name: "pid" + } + input_fields { + input_name: "user_tag_cate" + } + input_fields { + input_name: "combo_brand" + } + input_fields { + input_name: "combo_cate_id" + } + input_fields { + input_name: "img_path" + } + input_fields { + input_name: "group_size" + input_type: INT32 + } + +} +feature_config: { + features { + input_names: "user_id" + feature_type: TagFeature + embedding_dim: 16 + hash_bucket_size: 100000 + max_partitions: 4 + separator: "" + } + features { + input_names: "cms_segid" + feature_type: TagFeature + embedding_dim: 16 + hash_bucket_size: 100 + separator: "" + } + features { + input_names: "cms_group_id" + feature_type: TagFeature + embedding_dim: 16 + hash_bucket_size: 100 + separator: "" + } + features { + input_names: "age_level" + feature_type: TagFeature + embedding_dim: 16 + hash_bucket_size: 10 + separator: "" + } + features { + input_names: "pvalue_level" + feature_type: TagFeature + embedding_dim: 16 + hash_bucket_size: 10 + separator: "" + } + features { + input_names: "shopping_level" + feature_type: TagFeature + embedding_dim: 16 + hash_bucket_size: 10 + separator: "" + } + features { + input_names: "occupation" + feature_type: TagFeature + embedding_dim: 16 + hash_bucket_size: 10 + separator: "" + } + features { + input_names: "new_user_class_level" + feature_type: TagFeature + embedding_dim: 16 + hash_bucket_size: 10 + separator: "" + } + features { + input_names: "adgroup_id" + feature_type: TagFeature + embedding_dim: 16 + hash_bucket_size: 100000 + separator: "" + } + features { + input_names: "cate_id" + feature_type: TagFeature + embedding_dim: 16 + hash_bucket_size: 100000 + separator: "" + } + features { + input_names: "campaign_id" + feature_type: TagFeature + embedding_dim: 16 + hash_bucket_size: 100000 + separator: "" + } + features { + input_names: "customer" + feature_type: TagFeature + embedding_dim: 16 + hash_bucket_size: 100000 + separator: "" + } + features { + input_names: "brand" + feature_type: TagFeature + embedding_dim: 16 + hash_bucket_size: 100000 + separator: "" + } + features { + input_names: "price" + feature_type: RawFeature + separator: "" + } + features { + input_names: "pid" + feature_type: TagFeature + embedding_dim: 16 + hash_bucket_size: 100000 + separator: "" + } + features { + input_names: "user_tag_cate" + feature_type: TagFeature + embedding_dim: 16 + hash_bucket_size: 100000 + separator: "" + } + features { + input_names: "combo_brand" + feature_type: TagFeature + embedding_dim: 16 + hash_bucket_size: 100000 + separator: "" + } + features { + input_names: "combo_cate_id" + feature_type: TagFeature + embedding_dim: 16 + hash_bucket_size: 10000 + separator: "" + } + features { + input_names: "img_path" + feature_type: ImgFeature + img_shape { + width: 224 + height: 224 + channel: 3 + } + } + features { + input_names: "group_size" + feature_type: SampleNumFeature + }t +} +model_config { + model_class: "E2E_MM_DBMTL" + feature_groups { + group_name: "img" + feature_names: "img_path" + } + feature_groups { + group_name: "sample_num" + feature_names: "group_size" + } + feature_groups { + group_name: "all" + feature_names: "adgroup_id" + feature_names: "cate_id" + feature_names: "campaign_id" + feature_names: "customer" + feature_names: "brand" + feature_names: "price" + feature_names: "pid" + feature_names: "user_id" + feature_names: "cms_segid" + feature_names: "cms_group_id" + feature_names: "age_level" + feature_names: "pvalue_level" + feature_names: "shopping_level" + feature_names: "occupation" + feature_names: "new_user_class_level" + feature_names: "user_tag_cate" + feature_names: "combo_brand" + feature_names: "combo_cate_id" + wide_deep: DEEP + } + embedding_regularization: 1e-05 + e2e_mm_dbmtl { + highway_dnn { + input: "img" + emb_size: 16 + } + bottom_dnn { + hidden_units: 1024 + hidden_units: 512 + dropout_ratio: 0.3 + dropout_ratio: 0.3 + use_bn: false + } + task_towers { + tower_name: "clk" + label_name: "clk" + metrics_set { + auc { + } + } + loss_type: CLASSIFICATION + dnn { + hidden_units: 256 + hidden_units: 128 + hidden_units: 64 + hidden_units: 32 + dropout_ratio: 0.2 + dropout_ratio: 0.2 + dropout_ratio: 0.1 + dropout_ratio: 0.1 + use_bn: false + } + relation_dnn { + hidden_units: 16 + use_bn: false + } + weight: 1.0 + } + } +} \ No newline at end of file diff --git a/samples/odps_script/vector_retrieve/drop_table.sql b/samples/odps_script/vector_retrieve/drop_table.sql index 3550efc6b..7d7f03062 100644 --- a/samples/odps_script/vector_retrieve/drop_table.sql +++ b/samples/odps_script/vector_retrieve/drop_table.sql @@ -1,3 +1,3 @@ drop TABLE IF EXISTS query_vector_{TIME_STAMP}; drop TABLE IF EXISTS doc_vector_{TIME_STAMP}; -drop TABLE IF EXISTS result_vector_{TIME_STAMP}; \ No newline at end of file +drop TABLE IF EXISTS result_vector_{TIME_STAMP}; diff --git a/samples/odps_script/vector_retrieve/run_vector_retrieve.sql b/samples/odps_script/vector_retrieve/run_vector_retrieve.sql index 2314a3eea..2f4559c54 100644 --- a/samples/odps_script/vector_retrieve/run_vector_retrieve.sql +++ b/samples/odps_script/vector_retrieve/run_vector_retrieve.sql @@ -13,4 +13,4 @@ pai -name easy_rec_ext -Dknn_feature_dims=4 -Dknn_index_type='ivfflat' -Dknn_feature_delimiter=',' -; \ No newline at end of file +; diff --git a/setup.cfg b/setup.cfg index fa156e1ee..663aa72c4 100644 --- a/setup.cfg +++ b/setup.cfg @@ -10,7 +10,7 @@ multi_line_output = 7 force_single_line = true known_standard_library = setuptools known_first_party = easy_rec -known_third_party = absl,common_io,future,google,matplotlib,numpy,oss2,pai,pandas,psutil,six,sklearn,sphinx_markdown_tables,sphinx_rtd_theme,tensorflow,yaml +known_third_party = absl,common_io,cv2,future,google,matplotlib,numpy,oss2,pai,pandas,psutil,six,sklearn,sphinx_markdown_tables,sphinx_rtd_theme,tensorflow,yaml no_lines_before = LOCALFOLDER default_section = THIRDPARTY skip = easy_rec/python/protos From 9a12adc76746e7f2d84e81e2f0f0ce06cc582582 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=99=93=E6=82=A6?= Date: Mon, 28 Feb 2022 19:13:52 +0800 Subject: [PATCH 2/9] add requirements --- requirements/runtime.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements/runtime.txt b/requirements/runtime.txt index d3bc757fc..8984cb2cb 100644 --- a/requirements/runtime.txt +++ b/requirements/runtime.txt @@ -8,3 +8,4 @@ xlrd >= 0.9.0 # pydatahub # cprotobuf is required by pydatahub # cprotobuf==0.1.9 +opencv-python \ No newline at end of file From 1e3e0d4fa07c168ee05abe3c17b32c7348f7739b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=99=93=E6=82=A6?= Date: Mon, 28 Feb 2022 19:48:50 +0800 Subject: [PATCH 3/9] add requirements --- requirements/runtime.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/runtime.txt b/requirements/runtime.txt index 8984cb2cb..52611dd70 100644 --- a/requirements/runtime.txt +++ b/requirements/runtime.txt @@ -8,4 +8,4 @@ xlrd >= 0.9.0 # pydatahub # cprotobuf is required by pydatahub # cprotobuf==0.1.9 -opencv-python \ No newline at end of file +opencv-python-headless \ No newline at end of file From bd85372646a96ee889f1cfb0a4fdd20c5a24f421 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=99=93=E6=82=A6?= Date: Mon, 28 Feb 2022 19:50:31 +0800 Subject: [PATCH 4/9] add requirements --- requirements/runtime.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/runtime.txt b/requirements/runtime.txt index 52611dd70..8b81d921a 100644 --- a/requirements/runtime.txt +++ b/requirements/runtime.txt @@ -8,4 +8,4 @@ xlrd >= 0.9.0 # pydatahub # cprotobuf is required by pydatahub # cprotobuf==0.1.9 -opencv-python-headless \ No newline at end of file +opencv-python-headless==4.2.0.32 \ No newline at end of file From ec56cb7dbc4d6faa9592bfde5b1681432abcb966 Mon Sep 17 00:00:00 2001 From: weisu Date: Fri, 4 Mar 2022 17:28:18 +0800 Subject: [PATCH 5/9] [feat]: add type convert force for conv1d --- easy_rec/python/layers/common_layers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/easy_rec/python/layers/common_layers.py b/easy_rec/python/layers/common_layers.py index 883f2a67c..80ad1496f 100644 --- a/easy_rec/python/layers/common_layers.py +++ b/easy_rec/python/layers/common_layers.py @@ -65,8 +65,8 @@ def text_cnn(x, # conv shape: (batch_size, seq_len - filter_size + 1, num_filters) conv = tf.layers.conv1d( x, - filters=num_filter, - kernel_size=filter_size, + filters=int(num_filter), + kernel_size=int(filter_size), activation=tf.nn.relu, name='conv_layer', reuse=reuse, From 5a87dcb3d3bb01506fba35d2191b38458e054029 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=99=93=E6=82=A6?= Date: Mon, 7 Mar 2022 15:16:18 +0800 Subject: [PATCH 6/9] update docs --- .../models/end-to-end_multimodal_dbmtl.md | 10 ++++++++++ easy_rec/python/model/e2e_mm_dbmtl.py | 2 -- .../model_config/taobao_fg_e2e_mm_dbmtl.config | 18 ++++++++++++++++-- 3 files changed, 26 insertions(+), 4 deletions(-) diff --git a/docs/source/models/end-to-end_multimodal_dbmtl.md b/docs/source/models/end-to-end_multimodal_dbmtl.md index f3a5aff9c..8f81596ad 100644 --- a/docs/source/models/end-to-end_multimodal_dbmtl.md +++ b/docs/source/models/end-to-end_multimodal_dbmtl.md @@ -84,6 +84,10 @@ model_config { } embedding_regularization: 1e-05 e2e_mm_dbmtl { + img_model { + model_name: "ResNet" + num_classes: 16 + } highway_dnn { input: "img" emb_size: 16 @@ -168,6 +172,8 @@ export_config { - 分别配置名为 img 和 sample_num 的 feature_group - e2e_mm_dbmtl: - highway_dnn/img_dnn: 用来处理图像网络的输出向量 + - img_model: + - model_name: 支持"ResNet"/"MobileNet" - 其余配置与[dbmtl](dbmtl.md)一致 ### 示例Config @@ -182,6 +188,10 @@ export_config { - 推荐模型:部署到推荐服务上,接收特征和图像embedding(用逗号分割的字符串格式)所为输入。 - 完整模型:包含图像网络和推荐网络,线上不需要使用。 +### pre_train + +当您使用 ResNet 作为 img_model 时,您可以使用 [预训练模型](https://easyrec.oss-cn-beijing.aliyuncs.com/pre_train/resnet_v1a_18.zip) 进行初始化,具体参考[增量训练](../incremental_train.md)。 + ``` pai -name tensorflow1120_ext -project=algo_public diff --git a/easy_rec/python/model/e2e_mm_dbmtl.py b/easy_rec/python/model/e2e_mm_dbmtl.py index f810dd8fd..54fb62309 100644 --- a/easy_rec/python/model/e2e_mm_dbmtl.py +++ b/easy_rec/python/model/e2e_mm_dbmtl.py @@ -97,7 +97,6 @@ def img_net(self, img_feature): img_logit = img_model(img_feature) img_emb = self.tune_img_emb(img_logit) - self._prediction_dict['img_logits_test'] = img_emb if 'sample_num' in self._input_layer._feature_groups: # 扩展 img_emb, img_logits sample_idx_fea = tf.cast(self._sample_idx_fea, tf.int32) @@ -235,5 +234,4 @@ def get_outputs(self): suffix='_%s' % tower_name)) if 'img' in self._input_layer._feature_groups: outputs.append('img_emb') - outputs.append('img_logits_test') return outputs diff --git a/samples/model_config/taobao_fg_e2e_mm_dbmtl.config b/samples/model_config/taobao_fg_e2e_mm_dbmtl.config index 11902eb21..39f29a064 100644 --- a/samples/model_config/taobao_fg_e2e_mm_dbmtl.config +++ b/samples/model_config/taobao_fg_e2e_mm_dbmtl.config @@ -239,7 +239,7 @@ feature_config: { features { input_names: "group_size" feature_type: SampleNumFeature - }t + } } model_config { model_class: "E2E_MM_DBMTL" @@ -275,6 +275,10 @@ model_config { } embedding_regularization: 1e-05 e2e_mm_dbmtl { + img_model { + model_name: "ResNet" + num_classes: 16 + } highway_dnn { input: "img" emb_size: 16 @@ -312,4 +316,14 @@ model_config { weight: 1.0 } } -} \ No newline at end of file +} + +export_config { + multi_placeholder: true + img_input_name: "img_path" + img_shape { + width: 224 + height: 224 + channel: 3 + } +} From 79582c5e8c41e8065752db179aceb56ccf74d56a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=99=93=E6=82=A6?= Date: Mon, 7 Mar 2022 16:00:04 +0800 Subject: [PATCH 7/9] add vision backbones --- easy_rec/python/model/e2e_mm_dbmtl.py | 36 +- easy_rec/python/vision_backbones/__init__.py | 0 easy_rec/python/vision_backbones/net_utils.py | 44 + .../python/vision_backbones/nets/__init__.py | 1 + .../nets/mobilenet/__init__.py | 0 .../nets/mobilenet/conv_blocks.py | 474 +++++++ .../nets/mobilenet/mobilenet.py | 490 +++++++ .../nets/mobilenet/mobilenet_v2.py | 249 ++++ .../nets/mobilenet/mobilenet_v3.py | 405 ++++++ .../vision_backbones/nets/mobilenet_v1.py | 500 +++++++ .../vision_backbones/nets/resnet_utils.py | 330 +++++ .../python/vision_backbones/nets/resnet_v1.py | 1207 +++++++++++++++++ .../python/vision_backbones/nets/resnet_v2.py | 374 +++++ 13 files changed, 4091 insertions(+), 19 deletions(-) create mode 100644 easy_rec/python/vision_backbones/__init__.py create mode 100644 easy_rec/python/vision_backbones/net_utils.py create mode 100644 easy_rec/python/vision_backbones/nets/__init__.py create mode 100644 easy_rec/python/vision_backbones/nets/mobilenet/__init__.py create mode 100644 easy_rec/python/vision_backbones/nets/mobilenet/conv_blocks.py create mode 100644 easy_rec/python/vision_backbones/nets/mobilenet/mobilenet.py create mode 100644 easy_rec/python/vision_backbones/nets/mobilenet/mobilenet_v2.py create mode 100644 easy_rec/python/vision_backbones/nets/mobilenet/mobilenet_v3.py create mode 100644 easy_rec/python/vision_backbones/nets/mobilenet_v1.py create mode 100644 easy_rec/python/vision_backbones/nets/resnet_utils.py create mode 100644 easy_rec/python/vision_backbones/nets/resnet_v1.py create mode 100644 easy_rec/python/vision_backbones/nets/resnet_v2.py diff --git a/easy_rec/python/model/e2e_mm_dbmtl.py b/easy_rec/python/model/e2e_mm_dbmtl.py index 54fb62309..21ca3c236 100644 --- a/easy_rec/python/model/e2e_mm_dbmtl.py +++ b/easy_rec/python/model/e2e_mm_dbmtl.py @@ -1,9 +1,7 @@ # -*- encoding:utf-8 -*- # Copyright (c) Alibaba, Inc. and its affiliates. import logging - import tensorflow as tf -from tensorflow.keras.applications.resnet50 import ResNet50 from easy_rec.python.layers import dnn from easy_rec.python.layers import mmoe @@ -78,23 +76,23 @@ def img_net(self, img_feature): with tf.device('/CPU:0'): img_feature = tf.reshape( img_feature, (-1, self.img_width, self.img_height, self.img_channel)) - # if self._model_config.img_model.model_name == 'ResNet': - # from easy_vision.python.core.backbones.nets.resnet_v1 import resnet_v1a_18 - # img_logit = resnet_v1a_18( - # img_feature, num_classes=self._model_config.img_model.num_classes, is_training=self._is_training)[0] - # elif self._model_config.img_model.model_name == 'MobileNet': - # from easy_vision.python.core.backbones.nets.mobilenet.mobilenet_v3 import mobilenet - # img_logit = mobilenet( - # img_feature, num_classes=self._model_config.img_model.num_classes, is_training=self._is_training)[0] - # else: - # assert False, "img_model must in [ResNet, MobileNet]" - - img_model = ResNet50( - include_top=True, - pooling='max', - classes=self._model_config.img_model.num_classes, - weights=None) - img_logit = img_model(img_feature) + if self._model_config.img_model.model_name == 'ResNet': + from easy_rec.python.backbones.nets.resnet_v1 import resnet_v1a_18 + img_logit = resnet_v1a_18( + img_feature, num_classes=self._model_config.img_model.num_classes, is_training=self._is_training)[0] + elif self._model_config.img_model.model_name == 'MobileNet': + from easy_rec.python.backbones.nets.mobilenet.mobilenet_v3 import mobilenet + img_logit = mobilenet( + img_feature, num_classes=self._model_config.img_model.num_classes, is_training=self._is_training)[0] + else: + assert False, "img_model must in [ResNet, MobileNet]" + # from tensorflow.keras.applications.resnet50 import ResNet50 + # img_model = ResNet50( + # include_top=True, + # pooling='max', + # classes=self._model_config.img_model.num_classes, + # weights=None) + # img_logit = img_model(img_feature) img_emb = self.tune_img_emb(img_logit) if 'sample_num' in self._input_layer._feature_groups: diff --git a/easy_rec/python/vision_backbones/__init__.py b/easy_rec/python/vision_backbones/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/easy_rec/python/vision_backbones/net_utils.py b/easy_rec/python/vision_backbones/net_utils.py new file mode 100644 index 000000000..cc137110e --- /dev/null +++ b/easy_rec/python/vision_backbones/net_utils.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python +# -*- encoding:utf-8 -*- +# Filename:net_util.py +# Author:wenmeng.zwm@alibaba-inc.com +# Date:2018-12-24 + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +slim = tf.contrib.slim + + +def reduced_kernel_size_for_small_input(input_tensor, kernel_size): + """Define kernel size which is automatically reduced for small input. + + If the shape of the input images is unknown at graph construction time this + function assumes that the input images are is large enough. + + Args: + input_tensor: input tensor of size [batch_size, height, width, channels]. + kernel_size: desired kernel size of length 2: [kernel_height, kernel_width] + + Returns: + a tensor with the kernel size. + + TODO(jrru): Make this function work with unknown shapes. Theoretically, this + can be done with the code below. Problems are two-fold: (1) If the shape was + known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot + handle tensors that define the kernel size. + shape = tf.shape(input_tensor) + return = tf.stack([tf.minimum(shape[1], kernel_size[0]), + tf.minimum(shape[2], kernel_size[1])]) + + """ + shape = input_tensor.get_shape().as_list() + if shape[1] is None or shape[2] is None: + kernel_size_out = kernel_size + else: + kernel_size_out = [min(shape[1], kernel_size[0]), + min(shape[2], kernel_size[1])] + return kernel_size_out diff --git a/easy_rec/python/vision_backbones/nets/__init__.py b/easy_rec/python/vision_backbones/nets/__init__.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/easy_rec/python/vision_backbones/nets/__init__.py @@ -0,0 +1 @@ + diff --git a/easy_rec/python/vision_backbones/nets/mobilenet/__init__.py b/easy_rec/python/vision_backbones/nets/mobilenet/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/easy_rec/python/vision_backbones/nets/mobilenet/conv_blocks.py b/easy_rec/python/vision_backbones/nets/mobilenet/conv_blocks.py new file mode 100644 index 000000000..a19c68e36 --- /dev/null +++ b/easy_rec/python/vision_backbones/nets/mobilenet/conv_blocks.py @@ -0,0 +1,474 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Convolution blocks for mobilenet.""" +import contextlib +import functools + +import tensorflow as tf +from tensorflow.contrib import slim as contrib_slim + +slim = contrib_slim + + +def _fixed_padding(inputs, kernel_size, rate=1): + """Pads the input along the spatial dimensions independently of input size. + + Pads the input such that if it was used in a convolution with 'VALID' padding, + the output would have the same dimensions as if the unpadded input was used + in a convolution with 'SAME' padding. + + Args: + inputs: A tensor of size [batch, height_in, width_in, channels]. + kernel_size: The kernel to be used in the conv2d or max_pool2d operation. + rate: An integer, rate for atrous convolution. + + Returns: + output: A tensor of size [batch, height_out, width_out, channels] with the + input, either intact (if kernel_size == 1) or padded (if kernel_size > 1). + """ + kernel_size_effective = [kernel_size[0] + (kernel_size[0] - 1) * (rate - 1), + kernel_size[0] + (kernel_size[0] - 1) * (rate - 1)] + pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1] + pad_beg = [pad_total[0] // 2, pad_total[1] // 2] + pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]] + padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg[0], pad_end[0]], + [pad_beg[1], pad_end[1]], [0, 0]]) + return padded_inputs + + +def _make_divisible(v, divisor, min_value=None): + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than 10%. + if new_v < 0.9 * v: + new_v += divisor + return new_v + + +def _split_divisible(num, num_ways, divisible_by=8): + """Evenly splits num, num_ways so each piece is a multiple of divisible_by.""" + assert num % divisible_by == 0 + assert num / num_ways >= divisible_by + # Note: want to round down, we adjust each split to match the total. + base = num // num_ways // divisible_by * divisible_by + result = [] + accumulated = 0 + for i in range(num_ways): + r = base + while accumulated + r < num * (i + 1) / num_ways: + r += divisible_by + result.append(r) + accumulated += r + assert accumulated == num + return result + + +@contextlib.contextmanager +def _v1_compatible_scope_naming(scope): + if scope is None: # Create uniqified separable blocks. + with tf.variable_scope(None, default_name='separable') as s, \ + tf.name_scope(s.original_name_scope): + yield '' + else: + # We use scope_depthwise, scope_pointwise for compatibility with V1 ckpts. + # which provide numbered scopes. + scope += '_' + yield scope + + +@slim.add_arg_scope +def split_separable_conv2d(input_tensor, + num_outputs, + scope=None, + normalizer_fn=None, + stride=1, + rate=1, + endpoints=None, + use_explicit_padding=False): + """Separable mobilenet V1 style convolution. + + Depthwise convolution, with default non-linearity, + followed by 1x1 depthwise convolution. This is similar to + slim.separable_conv2d, but differs in tha it applies batch + normalization and non-linearity to depthwise. This matches + the basic building of Mobilenet Paper + (https://arxiv.org/abs/1704.04861) + + Args: + input_tensor: input + num_outputs: number of outputs + scope: optional name of the scope. Note if provided it will use + scope_depthwise for deptwhise, and scope_pointwise for pointwise. + normalizer_fn: which normalizer function to use for depthwise/pointwise + stride: stride + rate: output rate (also known as dilation rate) + endpoints: optional, if provided, will export additional tensors to it. + use_explicit_padding: Use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + + Returns: + output tesnor + """ + + with _v1_compatible_scope_naming(scope) as scope: + dw_scope = scope + 'depthwise' + endpoints = endpoints if endpoints is not None else {} + kernel_size = [3, 3] + padding = 'SAME' + if use_explicit_padding: + padding = 'VALID' + input_tensor = _fixed_padding(input_tensor, kernel_size, rate) + net = slim.separable_conv2d( + input_tensor, + None, + kernel_size, + depth_multiplier=1, + stride=stride, + rate=rate, + normalizer_fn=normalizer_fn, + padding=padding, + scope=dw_scope) + + endpoints[dw_scope] = net + + pw_scope = scope + 'pointwise' + net = slim.conv2d( + net, + num_outputs, [1, 1], + stride=1, + normalizer_fn=normalizer_fn, + scope=pw_scope) + endpoints[pw_scope] = net + return net + + +def expand_input_by_factor(n, divisible_by=8): + return lambda num_inputs, **_: _make_divisible(num_inputs * n, divisible_by) + + +def split_conv(input_tensor, + num_outputs, + num_ways, + scope, + divisible_by=8, + **kwargs): + """Creates a split convolution. + + Split convolution splits the input and output into + 'num_blocks' blocks of approximately the same size each, + and only connects $i$-th input to $i$ output. + + Args: + input_tensor: input tensor + num_outputs: number of output filters + num_ways: num blocks to split by. + scope: scope for all the operators. + divisible_by: make sure that every part is divisiable by this. + **kwargs: will be passed directly into conv2d operator + Returns: + tensor + """ + b = input_tensor.get_shape().as_list()[3] + + if num_ways == 1 or min(b // num_ways, + num_outputs // num_ways) < divisible_by: + # Don't do any splitting if we end up with less than 8 filters + # on either side. + return slim.conv2d(input_tensor, num_outputs, [1, 1], scope=scope, **kwargs) + + outs = [] + input_splits = _split_divisible(b, num_ways, divisible_by=divisible_by) + output_splits = _split_divisible( + num_outputs, num_ways, divisible_by=divisible_by) + inputs = tf.split(input_tensor, input_splits, axis=3, name='split_' + scope) + base = scope + for i, (input_tensor, out_size) in enumerate(zip(inputs, output_splits)): + scope = base + '_part_%d' % (i,) + n = slim.conv2d(input_tensor, out_size, [1, 1], scope=scope, **kwargs) + n = tf.identity(n, scope + '_output') + outs.append(n) + return tf.concat(outs, 3, name=scope + '_concat') + + +@slim.add_arg_scope +def expanded_conv(input_tensor, + num_outputs, + expansion_size=expand_input_by_factor(6), + stride=1, + rate=1, + kernel_size=(3, 3), + residual=True, + normalizer_fn=None, + split_projection=1, + split_expansion=1, + split_divisible_by=8, + expansion_transform=None, + depthwise_location='expansion', + depthwise_channel_multiplier=1, + endpoints=None, + use_explicit_padding=False, + padding='SAME', + inner_activation_fn=None, + depthwise_activation_fn=None, + project_activation_fn=tf.identity, + depthwise_fn=slim.separable_conv2d, + expansion_fn=split_conv, + projection_fn=split_conv, + scope=None): + """Depthwise Convolution Block with expansion. + + Builds a composite convolution that has the following structure + expansion (1x1) -> depthwise (kernel_size) -> projection (1x1) + + Args: + input_tensor: input + num_outputs: number of outputs in the final layer. + expansion_size: the size of expansion, could be a constant or a callable. + If latter it will be provided 'num_inputs' as an input. For forward + compatibility it should accept arbitrary keyword arguments. + Default will expand the input by factor of 6. + stride: depthwise stride + rate: depthwise rate + kernel_size: depthwise kernel + residual: whether to include residual connection between input + and output. + normalizer_fn: batchnorm or otherwise + split_projection: how many ways to split projection operator + (that is conv expansion->bottleneck) + split_expansion: how many ways to split expansion op + (that is conv bottleneck->expansion) ops will keep depth divisible + by this value. + split_divisible_by: make sure every split group is divisible by this number. + expansion_transform: Optional function that takes expansion + as a single input and returns output. + depthwise_location: where to put depthwise covnvolutions supported + values None, 'input', 'output', 'expansion' + depthwise_channel_multiplier: depthwise channel multiplier: + each input will replicated (with different filters) + that many times. So if input had c channels, + output will have c x depthwise_channel_multpilier. + endpoints: An optional dictionary into which intermediate endpoints are + placed. The keys "expansion_output", "depthwise_output", + "projection_output" and "expansion_transform" are always populated, even + if the corresponding functions are not invoked. + use_explicit_padding: Use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + padding: Padding type to use if `use_explicit_padding` is not set. + inner_activation_fn: activation function to use in all inner convolutions. + If none, will rely on slim default scopes. + depthwise_activation_fn: activation function to use for deptwhise only. + If not provided will rely on slim default scopes. If both + inner_activation_fn and depthwise_activation_fn are provided, + depthwise_activation_fn takes precedence over inner_activation_fn. + project_activation_fn: activation function for the project layer. + (note this layer is not affected by inner_activation_fn) + depthwise_fn: Depthwise convolution function. + expansion_fn: Expansion convolution function. If use custom function then + "split_expansion" and "split_divisible_by" will be ignored. + projection_fn: Projection convolution function. If use custom function then + "split_projection" and "split_divisible_by" will be ignored. + + scope: optional scope. + + Returns: + Tensor of depth num_outputs + + Raises: + TypeError: on inval + """ + conv_defaults = {} + dw_defaults = {} + if inner_activation_fn is not None: + conv_defaults['activation_fn'] = inner_activation_fn + dw_defaults['activation_fn'] = inner_activation_fn + if depthwise_activation_fn is not None: + dw_defaults['activation_fn'] = depthwise_activation_fn + # pylint: disable=g-backslash-continuation + with tf.variable_scope(scope, default_name='expanded_conv') as s, \ + tf.name_scope(s.original_name_scope), \ + slim.arg_scope((slim.conv2d,), **conv_defaults), \ + slim.arg_scope((slim.separable_conv2d,), **dw_defaults): + prev_depth = input_tensor.get_shape().as_list()[3] + if depthwise_location not in [None, 'input', 'output', 'expansion']: + raise TypeError('%r is unknown value for depthwise_location' % + depthwise_location) + if use_explicit_padding: + if padding != 'SAME': + raise TypeError('`use_explicit_padding` should only be used with ' + '"SAME" padding.') + padding = 'VALID' + depthwise_func = functools.partial( + depthwise_fn, + num_outputs=None, + kernel_size=kernel_size, + depth_multiplier=depthwise_channel_multiplier, + stride=stride, + rate=rate, + normalizer_fn=normalizer_fn, + padding=padding, + scope='depthwise') + # b1 -> b2 * r -> b2 + # i -> (o * r) (bottleneck) -> o + input_tensor = tf.identity(input_tensor, 'input') + net = input_tensor + + if depthwise_location == 'input': + if use_explicit_padding: + net = _fixed_padding(net, kernel_size, rate) + net = depthwise_func(net, activation_fn=None) + net = tf.identity(net, name='depthwise_output') + if endpoints is not None: + endpoints['depthwise_output'] = net + + if callable(expansion_size): + inner_size = expansion_size(num_inputs=prev_depth) + else: + inner_size = expansion_size + + if inner_size > net.shape[3]: + if expansion_fn == split_conv: + expansion_fn = functools.partial( + expansion_fn, + num_ways=split_expansion, + divisible_by=split_divisible_by, + stride=1) + net = expansion_fn( + net, + inner_size, + scope='expand', + normalizer_fn=normalizer_fn) + net = tf.identity(net, 'expansion_output') + if endpoints is not None: + endpoints['expansion_output'] = net + + if depthwise_location == 'expansion': + if use_explicit_padding: + net = _fixed_padding(net, kernel_size, rate) + net = depthwise_func(net) + net = tf.identity(net, name='depthwise_output') + if endpoints is not None: + endpoints['depthwise_output'] = net + + if expansion_transform: + net = expansion_transform(expansion_tensor=net, input_tensor=input_tensor) + # Note in contrast with expansion, we always have + # projection to produce the desired output size. + if projection_fn == split_conv: + projection_fn = functools.partial( + projection_fn, + num_ways=split_projection, + divisible_by=split_divisible_by, + stride=1) + net = projection_fn( + net, + num_outputs, + scope='project', + normalizer_fn=normalizer_fn, + activation_fn=project_activation_fn) + if endpoints is not None: + endpoints['projection_output'] = net + if depthwise_location == 'output': + if use_explicit_padding: + net = _fixed_padding(net, kernel_size, rate) + net = depthwise_func(net, activation_fn=None) + net = tf.identity(net, name='depthwise_output') + if endpoints is not None: + endpoints['depthwise_output'] = net + + if callable(residual): # custom residual + net = residual(input_tensor=input_tensor, output_tensor=net) + elif (residual and + # stride check enforces that we don't add residuals when spatial + # dimensions are None + stride == 1 and + # Depth matches + net.get_shape().as_list()[3] == + input_tensor.get_shape().as_list()[3]): + net += input_tensor + return tf.identity(net, name='output') + + +@slim.add_arg_scope +def squeeze_excite(input_tensor, + divisible_by=8, + squeeze_factor=3, + inner_activation_fn=tf.nn.relu, + gating_fn=tf.sigmoid, + squeeze_input_tensor=None, + pool=None): + """Squeeze excite block for Mobilenet V3. + + If the squeeze_input_tensor - or the input_tensor if squeeze_input_tensor is + None - contains variable dimensions (Nonetype in tensor shape), perform + average pooling (as the first step in the squeeze operation) by calling + reduce_mean across the H/W of the input tensor. + + Args: + input_tensor: input tensor to apply SE block to. + divisible_by: ensures all inner dimensions are divisible by this number. + squeeze_factor: the factor of squeezing in the inner fully connected layer + inner_activation_fn: non-linearity to be used in inner layer. + gating_fn: non-linearity to be used for final gating function + squeeze_input_tensor: custom tensor to use for computing gating activation. + If provided the result will be input_tensor * SE(squeeze_input_tensor) + instead of input_tensor * SE(input_tensor). + pool: if number is provided will average pool with that kernel size + to compute inner tensor, followed by bilinear upsampling. + + Returns: + Gated input_tensor. (e.g. X * SE(X)) + """ + with tf.variable_scope('squeeze_excite'): + if squeeze_input_tensor is None: + squeeze_input_tensor = input_tensor + input_size = input_tensor.shape.as_list()[1:3] + pool_height, pool_width = squeeze_input_tensor.shape.as_list()[1:3] + stride = 1 + if pool is not None and pool_height >= pool: + pool_height, pool_width, stride = pool, pool, pool + input_channels = squeeze_input_tensor.shape.as_list()[3] + output_channels = input_tensor.shape.as_list()[3] + squeeze_channels = _make_divisible( + input_channels / squeeze_factor, divisor=divisible_by) + + if pool is None: + pooled = tf.reduce_mean(squeeze_input_tensor, axis=[1, 2], keepdims=True) + else: + pooled = tf.nn.avg_pool( + squeeze_input_tensor, (1, pool_height, pool_width, 1), + strides=(1, stride, stride, 1), + padding='VALID') + squeeze = slim.conv2d( + pooled, + kernel_size=(1, 1), + num_outputs=squeeze_channels, + normalizer_fn=None, + activation_fn=inner_activation_fn) + excite_outputs = output_channels + excite = slim.conv2d(squeeze, num_outputs=excite_outputs, + kernel_size=[1, 1], + normalizer_fn=None, + activation_fn=gating_fn) + if pool is not None: + # Note: As of 03/20/2019 only BILINEAR (the default) with + # align_corners=True has gradients implemented in TPU. + excite = tf.image.resize_images( + excite, input_size, + align_corners=True) + result = input_tensor * excite + return result diff --git a/easy_rec/python/vision_backbones/nets/mobilenet/mobilenet.py b/easy_rec/python/vision_backbones/nets/mobilenet/mobilenet.py new file mode 100644 index 000000000..62e139e89 --- /dev/null +++ b/easy_rec/python/vision_backbones/nets/mobilenet/mobilenet.py @@ -0,0 +1,490 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Mobilenet Base Class.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import collections +import contextlib +import copy +import os + +from easy_rec.python.vision_backbones import net_utils +import tensorflow as tf +from tensorflow.contrib import slim as contrib_slim +from collections import OrderedDict + +slim = contrib_slim + + +@slim.add_arg_scope +def apply_activation(x, name=None, activation_fn=None): + return activation_fn(x, name=name) if activation_fn else x + + +def _fixed_padding(inputs, kernel_size, rate=1): + """Pads the input along the spatial dimensions independently of input size. + + Pads the input such that if it was used in a convolution with 'VALID' padding, + the output would have the same dimensions as if the unpadded input was used + in a convolution with 'SAME' padding. + + Args: + inputs: A tensor of size [batch, height_in, width_in, channels]. + kernel_size: The kernel to be used in the conv2d or max_pool2d operation. + rate: An integer, rate for atrous convolution. + + Returns: + output: A tensor of size [batch, height_out, width_out, channels] with the + input, either intact (if kernel_size == 1) or padded (if kernel_size > 1). + """ + kernel_size_effective = [kernel_size[0] + (kernel_size[0] - 1) * (rate - 1), + kernel_size[0] + (kernel_size[0] - 1) * (rate - 1)] + pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1] + pad_beg = [pad_total[0] // 2, pad_total[1] // 2] + pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]] + padded_inputs = tf.pad( + tensor=inputs, + paddings=[[0, 0], [pad_beg[0], pad_end[0]], [pad_beg[1], pad_end[1]], + [0, 0]]) + return padded_inputs + + +def _make_divisible(v, divisor, min_value=None): + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than 10%. + if new_v < 0.9 * v: + new_v += divisor + return int(new_v) + + +@contextlib.contextmanager +def _set_arg_scope_defaults(defaults): + """Sets arg scope defaults for all items present in defaults. + + Args: + defaults: dictionary/list of pairs, containing a mapping from + function to a dictionary of default args. + + Yields: + context manager where all defaults are set. + """ + if hasattr(defaults, 'items'): + items = list(defaults.items()) + else: + items = defaults + if not items: + yield + else: + func, default_arg = items[0] + with slim.arg_scope(func, **default_arg): + with _set_arg_scope_defaults(items[1:]): + yield + + +@slim.add_arg_scope +def depth_multiplier(output_params, + multiplier, + divisible_by=8, + min_depth=8, + **unused_kwargs): + if 'num_outputs' not in output_params: + return + d = output_params['num_outputs'] + output_params['num_outputs'] = _make_divisible(d * multiplier, divisible_by, + min_depth) + + +_Op = collections.namedtuple('Op', ['op', 'params', 'multiplier_func']) + + +def op(opfunc, multiplier_func=depth_multiplier, **params): + multiplier = params.pop('multiplier_transform', multiplier_func) + return _Op(opfunc, params=params, multiplier_func=multiplier) + + +class NoOpScope(object): + """No-op context manager.""" + + def __enter__(self): + return None + + def __exit__(self, exc_type, exc_value, traceback): + return False + + +def safe_arg_scope(funcs, **kwargs): + """Returns `slim.arg_scope` with all None arguments removed. + + Arguments: + funcs: Functions to pass to `arg_scope`. + **kwargs: Arguments to pass to `arg_scope`. + + Returns: + arg_scope or No-op context manager. + + Note: can be useful if None value should be interpreted as "do not overwrite + this parameter value". + """ + filtered_args = {name: value for name, value in kwargs.items() + if value is not None} + if filtered_args: + return slim.arg_scope(funcs, **filtered_args) + else: + return NoOpScope() + + +@slim.add_arg_scope +def mobilenet_base( # pylint: disable=invalid-name + inputs, + conv_defs, + multiplier=1.0, + final_endpoint=None, + output_stride=None, + use_explicit_padding=False, + scope=None, + is_training=False): + """Mobilenet base network. + + Constructs a network from inputs to the given final endpoint. By default + the network is constructed in inference mode. To create network + in training mode use: + + with slim.arg_scope(mobilenet.training_scope()): + logits, endpoints = mobilenet_base(...) + + Args: + inputs: a tensor of shape [batch_size, height, width, channels]. + conv_defs: A list of op(...) layers specifying the net architecture. + multiplier: Float multiplier for the depth (number of channels) + for all convolution ops. The value must be greater than zero. Typical + usage will be to set this value in (0, 1) to reduce the number of + parameters or computation cost of the model. + final_endpoint: The name of last layer, for early termination for + for V1-based networks: last layer is "layer_14", for V2: "layer_20" + output_stride: An integer that specifies the requested ratio of input to + output spatial resolution. If not None, then we invoke atrous convolution + if necessary to prevent the network from reducing the spatial resolution + of the activation maps. Allowed values are 1 or any even number, excluding + zero. Typical values are 8 (accurate fully convolutional mode), 16 + (fast fully convolutional mode), and 32 (classification mode). + + NOTE- output_stride relies on all consequent operators to support dilated + operators via "rate" parameter. This might require wrapping non-conv + operators to operate properly. + + use_explicit_padding: Use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + scope: optional variable scope. + is_training: How to setup batch_norm and other ops. Note: most of the time + this does not need be set directly. Use mobilenet.training_scope() to set + up training instead. This parameter is here for backward compatibility + only. It is safe to set it to the value matching + training_scope(is_training=...). It is also safe to explicitly set + it to False, even if there is outer training_scope set to to training. + (The network will be built in inference mode). If this is set to None, + no arg_scope is added for slim.batch_norm's is_training parameter. + + Returns: + tensor_out: output tensor. + end_points: a set of activations for external use, for example summaries or + losses. + + Raises: + ValueError: depth_multiplier <= 0, or the target output_stride is not + allowed. + """ + if multiplier <= 0: + raise ValueError('multiplier is not greater than zero.') + + # Set conv defs defaults and overrides. + conv_defs_defaults = conv_defs.get('defaults', {}) + conv_defs_overrides = conv_defs.get('overrides', {}) + if use_explicit_padding: + conv_defs_overrides = copy.deepcopy(conv_defs_overrides) + conv_defs_overrides[ + (slim.conv2d, slim.separable_conv2d)] = {'padding': 'VALID'} + + if output_stride is not None: + if output_stride == 0 or (output_stride > 1 and output_stride % 2): + raise ValueError('Output stride must be None, 1 or a multiple of 2.') + + # a) Set the tensorflow scope + # b) set padding to default: note we might consider removing this + # since it is also set by mobilenet_scope + # c) set all defaults + # d) set all extra overrides. + # pylint: disable=g-backslash-continuation + with _scope_all(scope, default_scope='Mobilenet'), \ + safe_arg_scope([slim.batch_norm], is_training=is_training), \ + _set_arg_scope_defaults(conv_defs_defaults), \ + _set_arg_scope_defaults(conv_defs_overrides): + # The current_stride variable keeps track of the output stride of the + # activations, i.e., the running product of convolution strides up to the + # current network layer. This allows us to invoke atrous convolution + # whenever applying the next convolution would result in the activations + # having output stride larger than the target output_stride. + current_stride = 1 + + # The atrous convolution rate parameter. + rate = 1 + + net = inputs + # Insert default parameters before the base scope which includes + # any custom overrides set in mobilenet. + end_points = OrderedDict() + scopes = {} + for i, opdef in enumerate(conv_defs['spec']): + params = dict(opdef.params) + opdef.multiplier_func(params, multiplier) + stride = params.get('stride', 1) + if output_stride is not None and current_stride == output_stride: + # If we have reached the target output_stride, then we need to employ + # atrous convolution with stride=1 and multiply the atrous rate by the + # current unit's stride for use in subsequent layers. + layer_stride = 1 + layer_rate = rate + rate *= stride + else: + layer_stride = stride + layer_rate = 1 + current_stride *= stride + # Update params. + params['stride'] = layer_stride + # Only insert rate to params if rate > 1. + if layer_rate > 1: + if tuple(params.get('kernel_size', [])) != (1, 1): + # We will apply atrous rate in the following cases: + # 1) When kernel_size is not in params, the operation then uses + # default kernel size 3x3. + # 2) When kernel_size is in params, and if the kernel_size is not + # equal to (1, 1) (there is no need to apply atrous convolution to + # any 1x1 convolution). + params['rate'] = layer_rate + # Set padding + if use_explicit_padding: + if 'kernel_size' in params: + net = _fixed_padding(net, params['kernel_size'], layer_rate) + else: + params['use_explicit_padding'] = True + + end_point = 'layer_%d' % (i + 1) + try: + net = opdef.op(net, **params) + except Exception: + print('Failed to create op %i: %r params: %r' % (i, opdef, params)) + raise + end_points[end_point] = net + scope = os.path.dirname(net.name) + scopes[scope] = end_point + if final_endpoint is not None and end_point == final_endpoint: + break + + # Add all tensors that end with 'output' to + # endpoints + for t in net.graph.get_operations(): + scope = os.path.dirname(t.name) + bn = os.path.basename(t.name) + if scope in scopes and t.name.endswith('output'): + end_points[scopes[scope] + '/' + bn] = t.outputs[0] + return net, end_points + + +@contextlib.contextmanager +def _scope_all(scope, default_scope=None): + with tf.variable_scope(scope, default_name=default_scope) as s,\ + tf.name_scope(s.original_name_scope): + yield s + + +@slim.add_arg_scope +def mobilenet(inputs, + num_classes=1001, + prediction_fn=slim.softmax, + reuse=None, + scope='Mobilenet', + base_only=False, + global_pool=True, + **mobilenet_args): + """Mobilenet model for classification, supports both V1 and V2. + + Note: default mode is inference, use mobilenet.training_scope to create + training network. + + + Args: + inputs: a tensor of shape [batch_size, height, width, channels]. + num_classes: number of predicted classes. If 0 or None, the logits layer + is omitted and the input features to the logits layer (before dropout) + are returned instead. + prediction_fn: a function to get predictions out of logits + (default softmax). + reuse: whether or not the network and its variables should be reused. To be + able to reuse 'scope' must be given. + scope: Optional variable_scope. + base_only: if True will only create the base of the network (no pooling + and no logits). + **mobilenet_args: passed to mobilenet_base verbatim. + - conv_defs: list of conv defs + - multiplier: Float multiplier for the depth (number of channels) + for all convolution ops. The value must be greater than zero. Typical + usage will be to set this value in (0, 1) to reduce the number of + parameters or computation cost of the model. + - output_stride: will ensure that the last layer has at most total stride. + If the architecture calls for more stride than that provided + (e.g. output_stride=16, but the architecture has 5 stride=2 operators), + it will replace output_stride with fractional convolutions using Atrous + Convolutions. + + Returns: + logits: the pre-softmax activations, a tensor of size + [batch_size, num_classes] + end_points: a dictionary from components of the network to the corresponding + activation tensor. + + Raises: + ValueError: Input rank is invalid. + """ + is_training = mobilenet_args.get('is_training', False) + input_shape = inputs.get_shape().as_list() + if len(input_shape) != 4: + raise ValueError('Expected rank 4 input, was: %d' % len(input_shape)) + + with tf.variable_scope(scope, 'Mobilenet', reuse=reuse) as scope: + inputs = tf.identity(inputs, 'input') + net, end_points = mobilenet_base(inputs, scope=scope, **mobilenet_args) + if base_only: + return net, end_points + + net = tf.identity(net, name='embedding') + + with tf.variable_scope('Logits'): + if global_pool: + net = global_pool_func(net) + end_points['global_pool'] = net + else: + # Pooling with a fixed kernel size. + kernel_size = net_utils.reduced_kernel_size_for_small_input(net, [7, 7]) + net = slim.avg_pool2d(net, kernel_size, padding='VALID', + scope='AvgPool_1a') + end_points['AvgPool_1a'] = net + + if not num_classes: + return net, end_points + net = slim.dropout(net, scope='Dropout', is_training=is_training) + # 1 x 1 x num_classes + # Note: legacy scope name. + logits = slim.conv2d( + net, + num_classes, [1, 1], + activation_fn=None, + normalizer_fn=None, + biases_initializer=tf.zeros_initializer(), + scope='Conv2d_1c_1x1') + + logits = tf.squeeze(logits, [1, 2]) + + logits = tf.identity(logits, name='output') + end_points['Logits'] = logits + if prediction_fn: + end_points['Predictions'] = prediction_fn(logits, 'Predictions') + return logits, end_points + + +def global_pool_func(input_tensor, pool_op=tf.nn.avg_pool): + """Applies avg pool to produce 1x1 output. + + NOTE: This function is funcitonally equivalenet to reduce_mean, but it has + baked in average pool which has better support across hardware. + + Args: + input_tensor: input tensor + pool_op: pooling op (avg pool is default) + Returns: + a tensor batch_size x 1 x 1 x depth. + """ + shape = input_tensor.get_shape().as_list() + if shape[1] is None or shape[2] is None: + kernel_size = tf.convert_to_tensor( + [1, tf.shape(input_tensor)[1], + tf.shape(input_tensor)[2], 1]) + else: + kernel_size = [1, shape[1], shape[2], 1] + output = pool_op( + input_tensor, ksize=kernel_size, strides=[1, 1, 1, 1], padding='VALID') + # Recover output shape, for unknown shape. + output.set_shape([None, 1, 1, None]) + return output + + +def training_scope(is_training=True, + weight_decay=0.00004, + stddev=0.09, + dropout_keep_prob=0.8, + bn_decay=0.997): + """Defines Mobilenet training scope. + + Usage: + with tf.contrib.slim.arg_scope(mobilenet.training_scope()): + logits, endpoints = mobilenet_v2.mobilenet(input_tensor) + + # the network created will be trainble with dropout/batch norm + # initialized appropriately. + Args: + is_training: if set to False this will ensure that all customizations are + set to non-training mode. This might be helpful for code that is reused + across both training/evaluation, but most of the time training_scope with + value False is not needed. If this is set to None, the parameters is not + added to the batch_norm arg_scope. + + weight_decay: The weight decay to use for regularizing the model. + stddev: Standard deviation for initialization, if negative uses xavier. + dropout_keep_prob: dropout keep probability (not set if equals to None). + bn_decay: decay for the batch norm moving averages (not set if equals to + None). + + Returns: + An argument scope to use via arg_scope. + """ + # Note: do not introduce parameters that would change the inference + # model here (for example whether to use bias), modify conv_def instead. + batch_norm_params = { + 'decay': bn_decay, + 'is_training': is_training + } + if stddev < 0: + weight_intitializer = slim.initializers.xavier_initializer() + else: + weight_intitializer = tf.truncated_normal_initializer(stddev=stddev) + + # Set weight_decay for weights in Conv and FC layers. + with slim.arg_scope( + [slim.conv2d, slim.fully_connected, slim.separable_conv2d], + weights_initializer=weight_intitializer, + normalizer_fn=slim.batch_norm), \ + slim.arg_scope([mobilenet_base, mobilenet], is_training=is_training),\ + safe_arg_scope([slim.batch_norm], **batch_norm_params), \ + safe_arg_scope([slim.dropout], is_training=is_training, + keep_prob=dropout_keep_prob), \ + slim.arg_scope([slim.conv2d], \ + weights_regularizer=slim.l2_regularizer(weight_decay)), \ + slim.arg_scope([slim.separable_conv2d], weights_regularizer=None) as s: + return s + + diff --git a/easy_rec/python/vision_backbones/nets/mobilenet/mobilenet_v2.py b/easy_rec/python/vision_backbones/nets/mobilenet/mobilenet_v2.py new file mode 100644 index 000000000..f1856c584 --- /dev/null +++ b/easy_rec/python/vision_backbones/nets/mobilenet/mobilenet_v2.py @@ -0,0 +1,249 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Implementation of Mobilenet V2. + +Architecture: https://arxiv.org/abs/1801.04381 + +The base model gives 72.2% accuracy on ImageNet, with 300MMadds, +3.4 M parameters. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +import functools + +import tensorflow as tf +from tensorflow.contrib import layers as contrib_layers +from tensorflow.contrib import slim as contrib_slim + +from easy_rec.python.vision_backbones.nets.mobilenet import conv_blocks as ops +from easy_rec.python.vision_backbones.nets.mobilenet import mobilenet as lib + +slim = contrib_slim +op = lib.op + +expand_input = ops.expand_input_by_factor + +# pyformat: disable +# Architecture: https://arxiv.org/abs/1801.04381 +V2_DEF = dict( + defaults={ + # Note: these parameters of batch norm affect the architecture + # that's why they are here and not in training_scope. + (slim.batch_norm,): {'center': True, 'scale': True}, + (slim.conv2d, slim.fully_connected, slim.separable_conv2d): { + 'normalizer_fn': slim.batch_norm, 'activation_fn': tf.nn.relu6 + }, + (ops.expanded_conv,): { + 'expansion_size': expand_input(6), + 'split_expansion': 1, + 'normalizer_fn': slim.batch_norm, + 'residual': True + }, + (slim.conv2d, slim.separable_conv2d): {'padding': 'SAME'} + }, + spec=[ + op(slim.conv2d, stride=2, num_outputs=32, kernel_size=[3, 3]), + op(ops.expanded_conv, + expansion_size=expand_input(1, divisible_by=1), + num_outputs=16), + op(ops.expanded_conv, stride=2, num_outputs=24), + op(ops.expanded_conv, stride=1, num_outputs=24), + op(ops.expanded_conv, stride=2, num_outputs=32), + op(ops.expanded_conv, stride=1, num_outputs=32), + op(ops.expanded_conv, stride=1, num_outputs=32), + op(ops.expanded_conv, stride=2, num_outputs=64), + op(ops.expanded_conv, stride=1, num_outputs=64), + op(ops.expanded_conv, stride=1, num_outputs=64), + op(ops.expanded_conv, stride=1, num_outputs=64), + op(ops.expanded_conv, stride=1, num_outputs=96), + op(ops.expanded_conv, stride=1, num_outputs=96), + op(ops.expanded_conv, stride=1, num_outputs=96), + op(ops.expanded_conv, stride=2, num_outputs=160), + op(ops.expanded_conv, stride=1, num_outputs=160), + op(ops.expanded_conv, stride=1, num_outputs=160), + op(ops.expanded_conv, stride=1, num_outputs=320), + op(slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=1280) + ], +) +# pyformat: enable + +# Mobilenet v2 Definition with group normalization. +V2_DEF_GROUP_NORM = copy.deepcopy(V2_DEF) +V2_DEF_GROUP_NORM['defaults'] = { + (contrib_slim.conv2d, contrib_slim.fully_connected, + contrib_slim.separable_conv2d): { + 'normalizer_fn': contrib_layers.group_norm, # pylint: disable=C0330 + 'activation_fn': tf.nn.relu6, # pylint: disable=C0330 + }, # pylint: disable=C0330 + (ops.expanded_conv,): { + 'expansion_size': ops.expand_input_by_factor(6), + 'split_expansion': 1, + 'normalizer_fn': contrib_layers.group_norm, + 'residual': True + }, + (contrib_slim.conv2d, contrib_slim.separable_conv2d): { + 'padding': 'SAME' + } +} + + +@slim.add_arg_scope +def mobilenet(input_tensor, + num_classes=1001, + depth_multiplier=1.0, + scope='MobilenetV2', + conv_defs=None, + finegrain_classification_mode=False, + min_depth=None, + divisible_by=None, + activation_fn=None, + **kwargs): + """Creates mobilenet V2 network. + + Inference mode is created by default. To create training use training_scope + below. + + with tf.contrib.slim.arg_scope(mobilenet_v2.training_scope()): + logits, endpoints = mobilenet_v2.mobilenet(input_tensor) + + Args: + input_tensor: The input tensor + num_classes: number of classes + depth_multiplier: The multiplier applied to scale number of + channels in each layer. + scope: Scope of the operator + conv_defs: Allows to override default conv def. + finegrain_classification_mode: When set to True, the model + will keep the last layer large even for small multipliers. Following + https://arxiv.org/abs/1801.04381 + suggests that it improves performance for ImageNet-type of problems. + *Note* ignored if final_endpoint makes the builder exit earlier. + min_depth: If provided, will ensure that all layers will have that + many channels after application of depth multiplier. + divisible_by: If provided will ensure that all layers # channels + will be divisible by this number. + activation_fn: Activation function to use, defaults to tf.nn.relu6 if not + specified. + **kwargs: passed directly to mobilenet.mobilenet: + prediction_fn- what prediction function to use. + reuse-: whether to reuse variables (if reuse set to true, scope + must be given). + Returns: + logits/endpoints pair + + Raises: + ValueError: On invalid arguments + """ + if conv_defs is None: + conv_defs = V2_DEF + if 'multiplier' in kwargs: + raise ValueError('mobilenetv2 doesn\'t support generic ' + 'multiplier parameter use "depth_multiplier" instead.') + if finegrain_classification_mode: + conv_defs = copy.deepcopy(conv_defs) + if depth_multiplier < 1: + conv_defs['spec'][-1].params['num_outputs'] /= depth_multiplier + if activation_fn: + conv_defs = copy.deepcopy(conv_defs) + defaults = conv_defs['defaults'] + conv_defaults = ( + defaults[(slim.conv2d, slim.fully_connected, slim.separable_conv2d)]) + conv_defaults['activation_fn'] = activation_fn + + depth_args = {} + # NB: do not set depth_args unless they are provided to avoid overriding + # whatever default depth_multiplier might have thanks to arg_scope. + if min_depth is not None: + depth_args['min_depth'] = min_depth + if divisible_by is not None: + depth_args['divisible_by'] = divisible_by + + with slim.arg_scope((lib.depth_multiplier,), **depth_args): + return lib.mobilenet( + input_tensor, + num_classes=num_classes, + conv_defs=conv_defs, + scope=scope, + multiplier=depth_multiplier, + **kwargs) + +mobilenet.default_image_size = 224 + + +def wrapped_partial(func, *args, **kwargs): + partial_func = functools.partial(func, *args, **kwargs) + functools.update_wrapper(partial_func, func) + return partial_func + + +# Wrappers for mobilenet v2 with depth-multipliers. Be noticed that +# 'finegrain_classification_mode' is set to True, which means the embedding +# layer will not be shrinked when given a depth-multiplier < 1.0. +mobilenet_v2_140 = wrapped_partial(mobilenet, depth_multiplier=1.4) +mobilenet_v2_050 = wrapped_partial(mobilenet, depth_multiplier=0.50, + finegrain_classification_mode=True) +mobilenet_v2_035 = wrapped_partial(mobilenet, depth_multiplier=0.35, + finegrain_classification_mode=True) + + +@slim.add_arg_scope +def mobilenet_base(input_tensor, depth_multiplier=1.0, **kwargs): + """Creates base of the mobilenet (no pooling and no logits) .""" + return mobilenet(input_tensor, + depth_multiplier=depth_multiplier, + base_only=True, **kwargs) + + +@slim.add_arg_scope +def mobilenet_base_group_norm(input_tensor, depth_multiplier=1.0, **kwargs): + """Creates base of the mobilenet (no pooling and no logits) .""" + kwargs['conv_defs'] = V2_DEF_GROUP_NORM + kwargs['conv_defs']['defaults'].update({ + (contrib_layers.group_norm,): { + 'groups': kwargs.pop('groups', 8) + } + }) + return mobilenet( + input_tensor, depth_multiplier=depth_multiplier, base_only=True, **kwargs) + + +def training_scope(**kwargs): + """Defines MobilenetV2 training scope. + + Usage: + with tf.contrib.slim.arg_scope(mobilenet_v2.training_scope()): + logits, endpoints = mobilenet_v2.mobilenet(input_tensor) + + with slim. + + Args: + **kwargs: Passed to mobilenet.training_scope. The following parameters + are supported: + weight_decay- The weight decay to use for regularizing the model. + stddev- Standard deviation for initialization, if negative uses xavier. + dropout_keep_prob- dropout keep probability + bn_decay- decay for the batch norm moving averages. + + Returns: + An `arg_scope` to use for the mobilenet v2 model. + """ + return lib.training_scope(**kwargs) + + +__all__ = ['training_scope', 'mobilenet_base', 'mobilenet', 'V2_DEF'] diff --git a/easy_rec/python/vision_backbones/nets/mobilenet/mobilenet_v3.py b/easy_rec/python/vision_backbones/nets/mobilenet/mobilenet_v3.py new file mode 100644 index 000000000..02575e2ec --- /dev/null +++ b/easy_rec/python/vision_backbones/nets/mobilenet/mobilenet_v3.py @@ -0,0 +1,405 @@ +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Mobilenet V3 conv defs and helper functions.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +import functools +import numpy as np + +import tensorflow as tf +from tensorflow.contrib import slim as contrib_slim + +from easy_rec.python.vision_backbones.nets.mobilenet import conv_blocks as ops +from easy_rec.python.vision_backbones.nets.mobilenet import mobilenet as lib + +slim = contrib_slim +op = lib.op +expand_input = ops.expand_input_by_factor + +# Squeeze Excite with all parameters filled-in, we use hard-sigmoid +# for gating function and relu for inner activation function. +squeeze_excite = functools.partial( + ops.squeeze_excite, squeeze_factor=4, + inner_activation_fn=tf.nn.relu, + gating_fn=lambda x: tf.nn.relu6(x+3)*0.16667) + +# Wrap squeeze excite op as expansion_transform that takes +# both expansion and input tensor. +_se4 = lambda expansion_tensor, input_tensor: squeeze_excite(expansion_tensor) + + +def hard_swish(x): + with tf.name_scope('hard_swish'): + return x * tf.nn.relu6(x + np.float32(3)) * np.float32(1. / 6.) + + +def reduce_to_1x1(input_tensor, default_size=7, **kwargs): + h, w = input_tensor.shape.as_list()[1:3] + if h is not None and w == h: + k = [h, h] + else: + k = [default_size, default_size] + return slim.avg_pool2d(input_tensor, kernel_size=k, **kwargs) + + +def mbv3_op(ef, n, k, s=1, act=tf.nn.relu, se=None, **kwargs): + """Defines a single Mobilenet V3 convolution block. + + Args: + ef: expansion factor + n: number of output channels + k: stride of depthwise + s: stride + act: activation function in inner layers + se: squeeze excite function. + **kwargs: passed to expanded_conv + + Returns: + An object (lib._Op) for inserting in conv_def, representing this operation. + """ + return op( + ops.expanded_conv, + expansion_size=expand_input(ef), + kernel_size=(k, k), + stride=s, + num_outputs=n, + inner_activation_fn=act, + expansion_transform=se, + **kwargs) + + +def mbv3_fused(ef, n, k, s=1, **kwargs): + """Defines a single Mobilenet V3 convolution block. + + Args: + ef: expansion factor + n: number of output channels + k: stride of depthwise + s: stride + **kwargs: will be passed to mbv3_op + + Returns: + An object (lib._Op) for inserting in conv_def, representing this operation. + """ + expansion_fn = functools.partial(slim.conv2d, kernel_size=k, stride=s) + return mbv3_op( + ef, + n, + k=1, + s=s, + depthwise_location=None, + expansion_fn=expansion_fn, + **kwargs) + + +mbv3_op_se = functools.partial(mbv3_op, se=_se4) + + +DEFAULTS = { + (ops.expanded_conv,): + dict( + normalizer_fn=slim.batch_norm, + residual=True), + (slim.conv2d, slim.fully_connected, slim.separable_conv2d): { + 'normalizer_fn': slim.batch_norm, + 'activation_fn': tf.nn.relu, + }, + (slim.batch_norm,): { + 'center': True, + 'scale': True + }, +} + +# Compatible checkpoint: http://mldash/5511169891790690458#scalars +V3_LARGE = dict( + defaults=dict(DEFAULTS), + spec=([ + # stage 1 + op(slim.conv2d, stride=2, num_outputs=16, kernel_size=(3, 3), + activation_fn=hard_swish), + mbv3_op(ef=1, n=16, k=3), + mbv3_op(ef=4, n=24, k=3, s=2), + mbv3_op(ef=3, n=24, k=3, s=1), + mbv3_op_se(ef=3, n=40, k=5, s=2), + mbv3_op_se(ef=3, n=40, k=5, s=1), + mbv3_op_se(ef=3, n=40, k=5, s=1), + mbv3_op(ef=6, n=80, k=3, s=2, act=hard_swish), + mbv3_op(ef=2.5, n=80, k=3, s=1, act=hard_swish), + mbv3_op(ef=184/80., n=80, k=3, s=1, act=hard_swish), + mbv3_op(ef=184/80., n=80, k=3, s=1, act=hard_swish), + mbv3_op_se(ef=6, n=112, k=3, s=1, act=hard_swish), + mbv3_op_se(ef=6, n=112, k=3, s=1, act=hard_swish), + mbv3_op_se(ef=6, n=160, k=5, s=2, act=hard_swish), + mbv3_op_se(ef=6, n=160, k=5, s=1, act=hard_swish), + mbv3_op_se(ef=6, n=160, k=5, s=1, act=hard_swish), + op(slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=960, + activation_fn=hard_swish), + op(reduce_to_1x1, default_size=7, stride=1, padding='VALID'), + op(slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=1280, + normalizer_fn=None, activation_fn=hard_swish) + ])) + +# 72.2% accuracy. +V3_LARGE_MINIMALISTIC = dict( + defaults=dict(DEFAULTS), + spec=([ + # stage 1 + op(slim.conv2d, stride=2, num_outputs=16, kernel_size=(3, 3)), + mbv3_op(ef=1, n=16, k=3), + mbv3_op(ef=4, n=24, k=3, s=2), + mbv3_op(ef=3, n=24, k=3, s=1), + mbv3_op(ef=3, n=40, k=3, s=2), + mbv3_op(ef=3, n=40, k=3, s=1), + mbv3_op(ef=3, n=40, k=3, s=1), + mbv3_op(ef=6, n=80, k=3, s=2), + mbv3_op(ef=2.5, n=80, k=3, s=1), + mbv3_op(ef=184 / 80., n=80, k=3, s=1), + mbv3_op(ef=184 / 80., n=80, k=3, s=1), + mbv3_op(ef=6, n=112, k=3, s=1), + mbv3_op(ef=6, n=112, k=3, s=1), + mbv3_op(ef=6, n=160, k=3, s=2), + mbv3_op(ef=6, n=160, k=3, s=1), + mbv3_op(ef=6, n=160, k=3, s=1), + op(slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=960), + op(reduce_to_1x1, default_size=7, stride=1, padding='VALID'), + op(slim.conv2d, + stride=1, + kernel_size=[1, 1], + num_outputs=1280, + normalizer_fn=None) + ])) + +# Compatible run: http://mldash/2023283040014348118#scalars +V3_SMALL = dict( + defaults=dict(DEFAULTS), + spec=([ + # stage 1 + op(slim.conv2d, stride=2, num_outputs=16, kernel_size=(3, 3), + activation_fn=hard_swish), + mbv3_op_se(ef=1, n=16, k=3, s=2), + mbv3_op(ef=72./16, n=24, k=3, s=2), + mbv3_op(ef=(88./24), n=24, k=3, s=1), + mbv3_op_se(ef=4, n=40, k=5, s=2, act=hard_swish), + mbv3_op_se(ef=6, n=40, k=5, s=1, act=hard_swish), + mbv3_op_se(ef=6, n=40, k=5, s=1, act=hard_swish), + mbv3_op_se(ef=3, n=48, k=5, s=1, act=hard_swish), + mbv3_op_se(ef=3, n=48, k=5, s=1, act=hard_swish), + mbv3_op_se(ef=6, n=96, k=5, s=2, act=hard_swish), + mbv3_op_se(ef=6, n=96, k=5, s=1, act=hard_swish), + mbv3_op_se(ef=6, n=96, k=5, s=1, act=hard_swish), + op(slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=576, + activation_fn=hard_swish), + op(reduce_to_1x1, default_size=7, stride=1, padding='VALID'), + op(slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=1024, + normalizer_fn=None, activation_fn=hard_swish) + ])) + +# 62% accuracy. +V3_SMALL_MINIMALISTIC = dict( + defaults=dict(DEFAULTS), + spec=([ + # stage 1 + op(slim.conv2d, stride=2, num_outputs=16, kernel_size=(3, 3)), + mbv3_op(ef=1, n=16, k=3, s=2), + mbv3_op(ef=72. / 16, n=24, k=3, s=2), + mbv3_op(ef=(88. / 24), n=24, k=3, s=1), + mbv3_op(ef=4, n=40, k=3, s=2), + mbv3_op(ef=6, n=40, k=3, s=1), + mbv3_op(ef=6, n=40, k=3, s=1), + mbv3_op(ef=3, n=48, k=3, s=1), + mbv3_op(ef=3, n=48, k=3, s=1), + mbv3_op(ef=6, n=96, k=3, s=2), + mbv3_op(ef=6, n=96, k=3, s=1), + mbv3_op(ef=6, n=96, k=3, s=1), + op(slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=576), + op(reduce_to_1x1, default_size=7, stride=1, padding='VALID'), + op(slim.conv2d, + stride=1, + kernel_size=[1, 1], + num_outputs=1024, + normalizer_fn=None) + ])) + + +# EdgeTPU friendly variant of MobilenetV3 that uses fused convolutions +# instead of depthwise in the early layers. +V3_EDGETPU = dict( + defaults=dict(DEFAULTS), + spec=[ + op(slim.conv2d, stride=2, num_outputs=32, kernel_size=(3, 3)), + mbv3_fused(k=3, s=1, ef=1, n=16), + mbv3_fused(k=3, s=2, ef=8, n=32), + mbv3_fused(k=3, s=1, ef=4, n=32), + mbv3_fused(k=3, s=1, ef=4, n=32), + mbv3_fused(k=3, s=1, ef=4, n=32), + mbv3_fused(k=3, s=2, ef=8, n=48), + mbv3_fused(k=3, s=1, ef=4, n=48), + mbv3_fused(k=3, s=1, ef=4, n=48), + mbv3_fused(k=3, s=1, ef=4, n=48), + mbv3_op(k=3, s=2, ef=8, n=96), + mbv3_op(k=3, s=1, ef=4, n=96), + mbv3_op(k=3, s=1, ef=4, n=96), + mbv3_op(k=3, s=1, ef=4, n=96), + mbv3_op(k=3, s=1, ef=8, n=96, residual=False), + mbv3_op(k=3, s=1, ef=4, n=96), + mbv3_op(k=3, s=1, ef=4, n=96), + mbv3_op(k=3, s=1, ef=4, n=96), + mbv3_op(k=5, s=2, ef=8, n=160), + mbv3_op(k=5, s=1, ef=4, n=160), + mbv3_op(k=5, s=1, ef=4, n=160), + mbv3_op(k=5, s=1, ef=4, n=160), + mbv3_op(k=3, s=1, ef=8, n=192), + op(slim.conv2d, stride=1, num_outputs=1280, kernel_size=(1, 1)), + ]) + + +@slim.add_arg_scope +def mobilenet(input_tensor, + num_classes=1001, + depth_multiplier=1.0, + scope='MobilenetV3', + conv_defs=None, + finegrain_classification_mode=False, + **kwargs): + """Creates mobilenet V3 network. + + Inference mode is created by default. To create training use training_scope + below. + + with tf.contrib.slim.arg_scope(mobilenet_v3.training_scope()): + logits, endpoints = mobilenet_v3.mobilenet(input_tensor) + + Args: + input_tensor: The input tensor + num_classes: number of classes + depth_multiplier: The multiplier applied to scale number of + channels in each layer. + scope: Scope of the operator + conv_defs: Which version to create. Could be large/small or + any conv_def (see mobilenet_v3.py for examples). + finegrain_classification_mode: When set to True, the model + will keep the last layer large even for small multipliers. Following + https://arxiv.org/abs/1801.04381 + it improves performance for ImageNet-type of problems. + *Note* ignored if final_endpoint makes the builder exit earlier. + **kwargs: passed directly to mobilenet.mobilenet: + prediction_fn- what prediction function to use. + reuse-: whether to reuse variables (if reuse set to true, scope + must be given). + Returns: + logits/endpoints pair + + Raises: + ValueError: On invalid arguments + """ + if conv_defs is None: + conv_defs = V3_LARGE + if 'multiplier' in kwargs: + raise ValueError('mobilenetv2 doesn\'t support generic ' + 'multiplier parameter use "depth_multiplier" instead.') + if finegrain_classification_mode: + conv_defs = copy.deepcopy(conv_defs) + conv_defs['spec'][-1] = conv_defs['spec'][-1]._replace( + multiplier_func=lambda params, multiplier: params) + depth_args = {} + with slim.arg_scope((lib.depth_multiplier,), **depth_args): + return lib.mobilenet( + input_tensor, + num_classes=num_classes, + conv_defs=conv_defs, + scope=scope, + multiplier=depth_multiplier, + **kwargs) + +mobilenet.default_image_size = 224 +training_scope = lib.training_scope + + +@slim.add_arg_scope +def mobilenet_base(input_tensor, depth_multiplier=1.0, **kwargs): + """Creates base of the mobilenet (no pooling and no logits) .""" + return mobilenet( + input_tensor, depth_multiplier=depth_multiplier, base_only=True, **kwargs) + + +def wrapped_partial(func, new_defaults=None, + **kwargs): + """Partial function with new default parameters and updated docstring.""" + if not new_defaults: + new_defaults = {} + def func_wrapper(*f_args, **f_kwargs): + new_kwargs = dict(new_defaults) + new_kwargs.update(f_kwargs) + return func(*f_args, **new_kwargs) + functools.update_wrapper(func_wrapper, func) + partial_func = functools.partial(func_wrapper, **kwargs) + functools.update_wrapper(partial_func, func) + return partial_func + + +large = wrapped_partial(mobilenet, conv_defs=V3_LARGE) +small = wrapped_partial(mobilenet, conv_defs=V3_SMALL) +edge_tpu = wrapped_partial(mobilenet, + new_defaults={'scope': 'MobilenetEdgeTPU'}, + conv_defs=V3_EDGETPU) +edge_tpu_075 = wrapped_partial( + mobilenet, + new_defaults={'scope': 'MobilenetEdgeTPU'}, + conv_defs=V3_EDGETPU, + depth_multiplier=0.75, + finegrain_classification_mode=True) + +# Minimalistic model that does not have Squeeze Excite blocks, +# Hardswish, or 5x5 depthwise convolution. +# This makes the model very friendly for a wide range of hardware +large_minimalistic = wrapped_partial(mobilenet, conv_defs=V3_LARGE_MINIMALISTIC) +small_minimalistic = wrapped_partial(mobilenet, conv_defs=V3_SMALL_MINIMALISTIC) + + +def _reduce_consecutive_layers(conv_defs, start_id, end_id, multiplier=0.5): + """Reduce the outputs of consecutive layers with multiplier. + + Args: + conv_defs: Mobilenet conv_defs. + start_id: 0-based index of the starting conv_def to be reduced. + end_id: 0-based index of the last conv_def to be reduced. + multiplier: The multiplier by which to reduce the conv_defs. + + Returns: + Mobilenet conv_defs where the output sizes from layers [start_id, end_id], + inclusive, are reduced by multiplier. + + Raises: + ValueError if any layer to be reduced does not have the 'num_outputs' + attribute. + """ + defs = copy.deepcopy(conv_defs) + for d in defs['spec'][start_id:end_id+1]: + d.params.update({ + 'num_outputs': np.int(np.round(d.params['num_outputs'] * multiplier)) + }) + return defs + + +V3_LARGE_DETECTION = _reduce_consecutive_layers(V3_LARGE, 13, 16) +V3_SMALL_DETECTION = _reduce_consecutive_layers(V3_SMALL, 9, 12) + + +__all__ = ['training_scope', 'mobilenet', 'V3_LARGE', 'V3_SMALL', 'large', + 'small', 'V3_LARGE_DETECTION', 'V3_SMALL_DETECTION'] diff --git a/easy_rec/python/vision_backbones/nets/mobilenet_v1.py b/easy_rec/python/vision_backbones/nets/mobilenet_v1.py new file mode 100644 index 000000000..be6d17357 --- /dev/null +++ b/easy_rec/python/vision_backbones/nets/mobilenet_v1.py @@ -0,0 +1,500 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= +"""MobileNet v1. + +MobileNet is a general architecture and can be used for multiple use cases. +Depending on the use case, it can use different input layer size and different +head (for example: embeddings, localization and classification). + +As described in https://arxiv.org/abs/1704.04861. + + MobileNets: Efficient Convolutional Neural Networks for + Mobile Vision Applications + Andrew G. Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang, + Tobias Weyand, Marco Andreetto, Hartwig Adam + +100% Mobilenet V1 (base) with input size 224x224: + +See mobilenet_v1() + +Layer params macs +-------------------------------------------------------------------------------- +MobilenetV1/Conv2d_0/Conv2D: 864 10,838,016 +MobilenetV1/Conv2d_1_depthwise/depthwise: 288 3,612,672 +MobilenetV1/Conv2d_1_pointwise/Conv2D: 2,048 25,690,112 +MobilenetV1/Conv2d_2_depthwise/depthwise: 576 1,806,336 +MobilenetV1/Conv2d_2_pointwise/Conv2D: 8,192 25,690,112 +MobilenetV1/Conv2d_3_depthwise/depthwise: 1,152 3,612,672 +MobilenetV1/Conv2d_3_pointwise/Conv2D: 16,384 51,380,224 +MobilenetV1/Conv2d_4_depthwise/depthwise: 1,152 903,168 +MobilenetV1/Conv2d_4_pointwise/Conv2D: 32,768 25,690,112 +MobilenetV1/Conv2d_5_depthwise/depthwise: 2,304 1,806,336 +MobilenetV1/Conv2d_5_pointwise/Conv2D: 65,536 51,380,224 +MobilenetV1/Conv2d_6_depthwise/depthwise: 2,304 451,584 +MobilenetV1/Conv2d_6_pointwise/Conv2D: 131,072 25,690,112 +MobilenetV1/Conv2d_7_depthwise/depthwise: 4,608 903,168 +MobilenetV1/Conv2d_7_pointwise/Conv2D: 262,144 51,380,224 +MobilenetV1/Conv2d_8_depthwise/depthwise: 4,608 903,168 +MobilenetV1/Conv2d_8_pointwise/Conv2D: 262,144 51,380,224 +MobilenetV1/Conv2d_9_depthwise/depthwise: 4,608 903,168 +MobilenetV1/Conv2d_9_pointwise/Conv2D: 262,144 51,380,224 +MobilenetV1/Conv2d_10_depthwise/depthwise: 4,608 903,168 +MobilenetV1/Conv2d_10_pointwise/Conv2D: 262,144 51,380,224 +MobilenetV1/Conv2d_11_depthwise/depthwise: 4,608 903,168 +MobilenetV1/Conv2d_11_pointwise/Conv2D: 262,144 51,380,224 +MobilenetV1/Conv2d_12_depthwise/depthwise: 4,608 225,792 +MobilenetV1/Conv2d_12_pointwise/Conv2D: 524,288 25,690,112 +MobilenetV1/Conv2d_13_depthwise/depthwise: 9,216 451,584 +MobilenetV1/Conv2d_13_pointwise/Conv2D: 1,048,576 51,380,224 +-------------------------------------------------------------------------------- +Total: 3,185,088 567,716,352 + + +75% Mobilenet V1 (base) with input size 128x128: + +See mobilenet_v1_075() + +Layer params macs +-------------------------------------------------------------------------------- +MobilenetV1/Conv2d_0/Conv2D: 648 2,654,208 +MobilenetV1/Conv2d_1_depthwise/depthwise: 216 884,736 +MobilenetV1/Conv2d_1_pointwise/Conv2D: 1,152 4,718,592 +MobilenetV1/Conv2d_2_depthwise/depthwise: 432 442,368 +MobilenetV1/Conv2d_2_pointwise/Conv2D: 4,608 4,718,592 +MobilenetV1/Conv2d_3_depthwise/depthwise: 864 884,736 +MobilenetV1/Conv2d_3_pointwise/Conv2D: 9,216 9,437,184 +MobilenetV1/Conv2d_4_depthwise/depthwise: 864 221,184 +MobilenetV1/Conv2d_4_pointwise/Conv2D: 18,432 4,718,592 +MobilenetV1/Conv2d_5_depthwise/depthwise: 1,728 442,368 +MobilenetV1/Conv2d_5_pointwise/Conv2D: 36,864 9,437,184 +MobilenetV1/Conv2d_6_depthwise/depthwise: 1,728 110,592 +MobilenetV1/Conv2d_6_pointwise/Conv2D: 73,728 4,718,592 +MobilenetV1/Conv2d_7_depthwise/depthwise: 3,456 221,184 +MobilenetV1/Conv2d_7_pointwise/Conv2D: 147,456 9,437,184 +MobilenetV1/Conv2d_8_depthwise/depthwise: 3,456 221,184 +MobilenetV1/Conv2d_8_pointwise/Conv2D: 147,456 9,437,184 +MobilenetV1/Conv2d_9_depthwise/depthwise: 3,456 221,184 +MobilenetV1/Conv2d_9_pointwise/Conv2D: 147,456 9,437,184 +MobilenetV1/Conv2d_10_depthwise/depthwise: 3,456 221,184 +MobilenetV1/Conv2d_10_pointwise/Conv2D: 147,456 9,437,184 +MobilenetV1/Conv2d_11_depthwise/depthwise: 3,456 221,184 +MobilenetV1/Conv2d_11_pointwise/Conv2D: 147,456 9,437,184 +MobilenetV1/Conv2d_12_depthwise/depthwise: 3,456 55,296 +MobilenetV1/Conv2d_12_pointwise/Conv2D: 294,912 4,718,592 +MobilenetV1/Conv2d_13_depthwise/depthwise: 6,912 110,592 +MobilenetV1/Conv2d_13_pointwise/Conv2D: 589,824 9,437,184 +-------------------------------------------------------------------------------- +Total: 1,800,144 106,002,432 + +""" + +# Tensorflow mandates these. +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from collections import namedtuple +from collections import OrderedDict +import functools + +from easy_rec.python.vision_backbones import net_utils +import tensorflow as tf + +slim = tf.contrib.slim + +# Conv and DepthSepConv namedtuple define layers of the MobileNet architecture +# Conv defines 3x3 convolution layers +# DepthSepConv defines 3x3 depthwise convolution followed by 1x1 convolution. +# stride is the stride of the convolution +# depth is the number of channels or filters in a layer +Conv = namedtuple('Conv', ['kernel', 'stride', 'depth']) +DepthSepConv = namedtuple('DepthSepConv', ['kernel', 'stride', 'depth']) + +# _CONV_DEFS specifies the MobileNet body +_CONV_DEFS = [ + Conv(kernel=[3, 3], stride=2, depth=32), + DepthSepConv(kernel=[3, 3], stride=1, depth=64), + DepthSepConv(kernel=[3, 3], stride=2, depth=128), + DepthSepConv(kernel=[3, 3], stride=1, depth=128), + DepthSepConv(kernel=[3, 3], stride=2, depth=256), + DepthSepConv(kernel=[3, 3], stride=1, depth=256), + DepthSepConv(kernel=[3, 3], stride=2, depth=512), + DepthSepConv(kernel=[3, 3], stride=1, depth=512), + DepthSepConv(kernel=[3, 3], stride=1, depth=512), + DepthSepConv(kernel=[3, 3], stride=1, depth=512), + DepthSepConv(kernel=[3, 3], stride=1, depth=512), + DepthSepConv(kernel=[3, 3], stride=1, depth=512), + DepthSepConv(kernel=[3, 3], stride=2, depth=1024), + DepthSepConv(kernel=[3, 3], stride=1, depth=1024) +] + + +def _fixed_padding(inputs, kernel_size, rate=1): + """Pads the input along the spatial dimensions independently of input size. + + Pads the input such that if it was used in a convolution with 'VALID' padding, + the output would have the same dimensions as if the unpadded input was used + in a convolution with 'SAME' padding. + + Args: + inputs: A tensor of size [batch, height_in, width_in, channels]. + kernel_size: The kernel to be used in the conv2d or max_pool2d operation. + rate: An integer, rate for atrous convolution. + + Returns: + output: A tensor of size [batch, height_out, width_out, channels] with the + input, either intact (if kernel_size == 1) or padded (if kernel_size > 1). + """ + kernel_size_effective = [kernel_size[0] + (kernel_size[0] - 1) * (rate - 1), + kernel_size[0] + (kernel_size[0] - 1) * (rate - 1)] + pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1] + pad_beg = [pad_total[0] // 2, pad_total[1] // 2] + pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]] + padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg[0], pad_end[0]], + [pad_beg[1], pad_end[1]], [0, 0]]) + return padded_inputs + + +def mobilenet_v1_base(inputs, + final_endpoint='Conv2d_13_pointwise', + min_depth=8, + depth_multiplier=1.0, + conv_defs=None, + output_stride=None, + use_explicit_padding=False, + scope=None): + """Mobilenet v1. + + Args: + inputs: a tensor of shape [batch_size, height, width, channels]. + final_endpoint: specifies the endpoint to construct the network up to. It + can be one of ['Conv2d_0', 'Conv2d_1_pointwise', 'Conv2d_2_pointwise', + 'Conv2d_3_pointwise', 'Conv2d_4_pointwise', 'Conv2d_5'_pointwise, + 'Conv2d_6_pointwise', 'Conv2d_7_pointwise', 'Conv2d_8_pointwise', + 'Conv2d_9_pointwise', 'Conv2d_10_pointwise', 'Conv2d_11_pointwise', + 'Conv2d_12_pointwise', 'Conv2d_13_pointwise']. + min_depth: Minimum depth value (number of channels) for all convolution ops. + Enforced when depth_multiplier < 1, and not an active constraint when + depth_multiplier >= 1. + depth_multiplier: Float multiplier for the depth (number of channels) + for all convolution ops. The value must be greater than zero. Typical + usage will be to set this value in (0, 1) to reduce the number of + parameters or computation cost of the model. + conv_defs: A list of ConvDef namedtuples specifying the net architecture. + output_stride: An integer that specifies the requested ratio of input to + output spatial resolution. If not None, then we invoke atrous convolution + if necessary to prevent the network from reducing the spatial resolution + of the activation maps. Allowed values are 8 (accurate fully convolutional + mode), 16 (fast fully convolutional mode), 32 (classification mode). + use_explicit_padding: Use 'VALID' padding for convolutions, but prepad + inputs so that the output dimensions are the same as if 'SAME' padding + were used. + scope: Optional variable_scope. + + Returns: + tensor_out: output tensor corresponding to the final_endpoint. + end_points: a set of activations for external use, for example summaries or + losses. + + Raises: + ValueError: if final_endpoint is not set to one of the predefined values, + or depth_multiplier <= 0, or the target output_stride is not + allowed. + """ + depth = lambda d: max(int(d * depth_multiplier), min_depth) + end_points = OrderedDict() + + # Used to find thinned depths for each layer. + if depth_multiplier <= 0: + raise ValueError('depth_multiplier is not greater than zero.') + + if conv_defs is None: + conv_defs = _CONV_DEFS + + if output_stride is not None and output_stride not in [8, 16, 32]: + raise ValueError('Only allowed output_stride values are 8, 16, 32.') + + padding = 'SAME' + if use_explicit_padding: + padding = 'VALID' + with tf.variable_scope(scope, 'MobilenetV1', [inputs]): + with slim.arg_scope([slim.conv2d, slim.separable_conv2d], padding=padding): + # The current_stride variable keeps track of the output stride of the + # activations, i.e., the running product of convolution strides up to the + # current network layer. This allows us to invoke atrous convolution + # whenever applying the next convolution would result in the activations + # having output stride larger than the target output_stride. + current_stride = 1 + + # The atrous convolution rate parameter. + rate = 1 + + net = inputs + for i, conv_def in enumerate(conv_defs): + end_point_base = 'Conv2d_%d' % i + + if output_stride is not None and current_stride == output_stride: + # If we have reached the target output_stride, then we need to employ + # atrous convolution with stride=1 and multiply the atrous rate by the + # current unit's stride for use in subsequent layers. + layer_stride = 1 + layer_rate = rate + rate *= conv_def.stride + else: + layer_stride = conv_def.stride + layer_rate = 1 + current_stride *= conv_def.stride + + if isinstance(conv_def, Conv): + end_point = end_point_base + if use_explicit_padding: + net = _fixed_padding(net, conv_def.kernel) + net = slim.conv2d(net, depth(conv_def.depth), conv_def.kernel, + stride=conv_def.stride, + normalizer_fn=slim.batch_norm, + scope=end_point) + end_points[end_point] = net + if end_point == final_endpoint: + return net, end_points + + elif isinstance(conv_def, DepthSepConv): + end_point = end_point_base + '_depthwise' + + # By passing filters=None + # separable_conv2d produces only a depthwise convolution layer + if use_explicit_padding: + net = _fixed_padding(net, conv_def.kernel, layer_rate) + net = slim.separable_conv2d(net, None, conv_def.kernel, + depth_multiplier=1, + stride=layer_stride, + rate=layer_rate, + normalizer_fn=slim.batch_norm, + scope=end_point) + + end_points[end_point] = net + if end_point == final_endpoint: + return net, end_points + + end_point = end_point_base + '_pointwise' + + net = slim.conv2d(net, depth(conv_def.depth), [1, 1], + stride=1, + normalizer_fn=slim.batch_norm, + scope=end_point) + + end_points[end_point] = net + if end_point == final_endpoint: + return net, end_points + else: + raise ValueError('Unknown convolution type %s for layer %d' + % (conv_def.ltype, i)) + raise ValueError('Unknown final endpoint %s' % final_endpoint) + + +def mobilenet_v1(inputs, + num_classes=1000, + dropout_keep_prob=0.999, + is_training=True, + min_depth=8, + depth_multiplier=1.0, + conv_defs=None, + prediction_fn=tf.contrib.layers.softmax, + spatial_squeeze=True, + reuse=None, + scope='MobilenetV1', + global_pool=False, + output_stride=None): + """Mobilenet v1 model for classification. + + Args: + inputs: a tensor of shape [batch_size, height, width, channels]. + num_classes: number of predicted classes. If 0 or None, the logits layer + is omitted and the input features to the logits layer (before dropout) + are returned instead. + dropout_keep_prob: the percentage of activation values that are retained. + is_training: whether is training or not. + min_depth: Minimum depth value (number of channels) for all convolution ops. + Enforced when depth_multiplier < 1, and not an active constraint when + depth_multiplier >= 1. + depth_multiplier: Float multiplier for the depth (number of channels) + for all convolution ops. The value must be greater than zero. Typical + usage will be to set this value in (0, 1) to reduce the number of + parameters or computation cost of the model. + conv_defs: A list of ConvDef namedtuples specifying the net architecture. + prediction_fn: a function to get predictions out of logits. + spatial_squeeze: if True, logits is of shape is [B, C], if false logits is + of shape [B, 1, 1, C], where B is batch_size and C is number of classes. + reuse: whether or not the network and its variables should be reused. To be + able to reuse 'scope' must be given. + scope: Optional variable_scope. + global_pool: Optional boolean flag to control the avgpooling before the + logits layer. If false or unset, pooling is done with a fixed window + that reduces default-sized inputs to 1x1, while larger inputs lead to + larger outputs. If true, any input size is pooled down to 1x1. + output_stride: An integer that specifies the requested ratio of input to + output spatial resolution. If not None, then we invoke atrous convolution + if necessary to prevent the network from reducing the spatial resolution + of the activation maps. Allowed values are 8 (accurate fully convolutional + mode), 16 (fast fully convolutional mode), 32 (classification mode). + + Returns: + net: a 2D Tensor with the logits (pre-softmax activations) if num_classes + is a non-zero integer, or the non-dropped-out input to the logits layer + if num_classes is 0 or None. + end_points: a dictionary from components of the network to the corresponding + activation. + + Raises: + ValueError: Input rank is invalid. + """ + input_shape = inputs.get_shape().as_list() + if len(input_shape) != 4: + raise ValueError('Invalid input tensor rank, expected 4, was: %d' % + len(input_shape)) + + with tf.variable_scope(scope, 'MobilenetV1', [inputs], reuse=reuse) as scope: + with slim.arg_scope([slim.batch_norm, slim.dropout], + is_training=is_training): + net, end_points = mobilenet_v1_base(inputs, scope=scope, + min_depth=min_depth, + depth_multiplier=depth_multiplier, + conv_defs=conv_defs, + output_stride=output_stride) + with tf.variable_scope('Logits'): + if global_pool: + # Global average pooling. + net = tf.reduce_mean(net, [1, 2], keep_dims=True, name='global_pool') + end_points['global_pool'] = net + else: + # Pooling with a fixed kernel size. + kernel_size = net_utils.reduced_kernel_size_for_small_input(net, [7, 7]) + net = slim.avg_pool2d(net, kernel_size, padding='VALID', + scope='AvgPool_1a') + end_points['AvgPool_1a'] = net + if not num_classes: + return net, end_points + # 1 x 1 x 1024 + net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b') + logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, + normalizer_fn=None, scope='Conv2d_1c_1x1') + if spatial_squeeze: + logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze') + end_points['Logits'] = logits + if prediction_fn: + end_points['Predictions'] = prediction_fn(logits, scope='Predictions') + return logits, end_points + +mobilenet_v1.default_image_size = 224 + + +def wrapped_partial(func, *args, **kwargs): + partial_func = functools.partial(func, *args, **kwargs) + functools.update_wrapper(partial_func, func) + return partial_func + + +mobilenet_v1_075 = wrapped_partial(mobilenet_v1, depth_multiplier=0.75) +mobilenet_v1_050 = wrapped_partial(mobilenet_v1, depth_multiplier=0.50) +mobilenet_v1_025 = wrapped_partial(mobilenet_v1, depth_multiplier=0.25) + + +def mobilenet_v1_arg_scope(is_training=True, + weight_decay=0.00004, + stddev=0.09, + regularize_depthwise=False, + batch_norm_decay=0.9997, + batch_norm_epsilon=0.001): + """Defines the default MobilenetV1 arg scope. + + Args: + is_training: Whether or not we're training the model. If this is set to + None, the parameter is not added to the batch_norm arg_scope. + weight_decay: The weight decay to use for regularizing the model. + stddev: The standard deviation of the trunctated normal weight initializer. + regularize_depthwise: Whether or not apply regularization on depthwise. + batch_norm_decay: Decay for batch norm moving average. + batch_norm_epsilon: Small float added to variance to avoid dividing by zero + in batch norm. + + Returns: + An `arg_scope` to use for the mobilenet v1 model. + """ + batch_norm_params = { + 'center': True, + 'scale': True, + 'decay': batch_norm_decay, + 'epsilon': batch_norm_epsilon, + } + if is_training is not None: + batch_norm_params['is_training'] = is_training + + # Set weight_decay for weights in Conv and DepthSepConv layers. + weights_init = tf.truncated_normal_initializer(stddev=stddev) + regularizer = tf.contrib.layers.l2_regularizer(weight_decay) + if regularize_depthwise: + depthwise_regularizer = regularizer + else: + depthwise_regularizer = None + with slim.arg_scope([slim.conv2d, slim.separable_conv2d], + weights_initializer=weights_init, + activation_fn=tf.nn.relu6, normalizer_fn=slim.batch_norm): + with slim.arg_scope([slim.batch_norm], **batch_norm_params): + with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer): + with slim.arg_scope([slim.separable_conv2d], + weights_regularizer=depthwise_regularizer) as sc: + return sc + + +def get_scopes_of_levels(scope, + conv_defs=None, + with_logits=True): + """ + Args: + scope: scope name for mobilenet_v1. + conv_defs: A list of ConvDef namedtuples specifying the net architecture. + with_logits: with classification layer or not. + return a list of variable scope list order by levels. + """ + if conv_defs is None: + conv_defs = _CONV_DEFS + + scopes_of_levels_reverse = [] + scopes_of_curr_level = [] + + for i, conv_def in enumerate(conv_defs): + end_point_base = scope + '/Conv2d_%d' % i + if conv_def.stride > 1: + if len(scopes_of_curr_level) > 0: + scopes_of_levels_reverse.append(scopes_of_curr_level) + scopes_of_curr_level = [] + + if isinstance(conv_def, Conv): + scopes_of_curr_level.append(end_point_base) + + elif isinstance(conv_def, DepthSepConv): + scopes_of_curr_level.extend([ + end_point_base + '_depthwise', + end_point_base + '_pointwise' + ]) + else: + raise ValueError('Unknown convolution type %s for layer %d' + % (conv_def.ltype, i)) + if len(scopes_of_curr_level) > 0: + scopes_of_levels_reverse.append(scopes_of_curr_level) + + if with_logits: + return [[scope + "/Logits"]] + list(reversed(scopes_of_levels_reverse)) + else: + return list(reversed(scopes_of_levels_reverse)) diff --git a/easy_rec/python/vision_backbones/nets/resnet_utils.py b/easy_rec/python/vision_backbones/nets/resnet_utils.py new file mode 100644 index 000000000..c7ef94a76 --- /dev/null +++ b/easy_rec/python/vision_backbones/nets/resnet_utils.py @@ -0,0 +1,330 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Contains building blocks for various versions of Residual Networks. + +Residual networks (ResNets) were proposed in: + Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun + Deep Residual Learning for Image Recognition. arXiv:1512.03385, 2015 + +More variants were introduced in: + Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun + Identity Mappings in Deep Residual Networks. arXiv: 1603.05027, 2016 + +We can obtain different ResNet variants by changing the network depth, width, +and form of residual unit. This module implements the infrastructure for +building them. Concrete ResNet units and full ResNet networks are implemented in +the accompanying resnet_v1.py and resnet_v2.py modules. + +Compared to https://github.com/KaimingHe/deep-residual-networks, in the current +implementation we subsample the output activations in the last residual unit of +each block, instead of subsampling the input activations in the first residual +unit of each block. The two implementations give identical results but our +implementation is more memory efficient. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import tensorflow as tf + +slim = tf.contrib.slim + + +class Block(collections.namedtuple('Block', ['scope', 'unit_fn', 'args'])): + """A named tuple describing a ResNet block. + + Its parts are: + scope: The scope of the `Block`. + unit_fn: The ResNet unit function which takes as input a `Tensor` and + returns another `Tensor` with the output of the ResNet unit. + args: A list of length equal to the number of units in the `Block`. The list + contains one (depth, depth_bottleneck, stride) tuple for each unit in the + block to serve as argument to unit_fn. + """ + + +def shortcut(inputs, + depth, + stride, + avg_down=False, + use_bounded_activations=False, + scope='shortcut'): + """Residual unit shortcut network. + + This is the original residual unit proposed in [1]. See Fig. 1(a) of [2] for + its definition. + + Args: + inputs: A tensor of size [batch, height, width, channels]. + depth: The depth of the ResNet unit output. + stride: The ResNet unit's stride. Determines the amount of downsampling of + the units output compared to its input. + avg_down: bool, default False + Whether to use average pooling for projection skip connection between stages/downsample. + use_bounded_activations: Whether or not to use bounded activations. Bounded + activations better lend themselves to quantized inference. + scope: Optional variable_scope. + + Returns: + The ResNet shortcut network output. + """ + depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4) + if avg_down: + x = inputs + if stride > 1: + x = slim.avg_pool2d(x, stride, stride=stride, padding='SAME', scope=scope + '_avg') + if depth != depth_in: + x = slim.conv2d( + x, + depth, [1, 1], + activation_fn=tf.nn.relu6 if use_bounded_activations else None, + scope=scope) + else: + if depth == depth_in: + x = subsample(inputs, stride, scope) + else: + x = slim.conv2d( + inputs, + depth, [1, 1], + stride=stride, + activation_fn=tf.nn.relu6 if use_bounded_activations else None, + scope=scope) + return x + + +def subsample(inputs, factor, scope=None): + """Subsamples the input along the spatial dimensions. + + Args: + inputs: A `Tensor` of size [batch, height_in, width_in, channels]. + factor: The subsampling factor. + scope: Optional variable_scope. + + Returns: + output: A `Tensor` of size [batch, height_out, width_out, channels] with the + input, either intact (if factor == 1) or subsampled (if factor > 1). + """ + if factor == 1: + return inputs + else: + return slim.max_pool2d(inputs, [1, 1], stride=factor, padding='SAME', scope=scope) + + +def conv2d_same(inputs, num_outputs, kernel_size, stride, rate=1, scope=None, **kwargs): + """Strided 2-D convolution with 'SAME' padding. + + When stride > 1, then we do explicit zero-padding, followed by conv2d with + 'VALID' padding. + + Note that + + net = conv2d_same(inputs, num_outputs, 3, stride=stride) + + is equivalent to + + net = slim.conv2d(inputs, num_outputs, 3, stride=1, padding='SAME') + net = subsample(net, factor=stride) + + whereas + + net = slim.conv2d(inputs, num_outputs, 3, stride=stride, padding='SAME') + + is different when the input's height or width is even, which is why we add the + current function. For more details, see ResnetUtilsTest.testConv2DSameEven(). + + Args: + inputs: A 4-D tensor of size [batch, height_in, width_in, channels]. + num_outputs: An integer, the number of output filters. + kernel_size: An int with the kernel_size of the filters. + stride: An integer, the output stride. + rate: An integer, rate for atrous convolution. + scope: Scope. + + Returns: + output: A 4-D tensor of size [batch, height_out, width_out, channels] with + the convolution output. + """ + if stride == 1: + return slim.conv2d(inputs, num_outputs, kernel_size, stride=1, rate=rate, + padding='SAME', scope=scope, **kwargs) + else: + kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1) + pad_total = kernel_size_effective - 1 + pad_beg = pad_total // 2 + pad_end = pad_total - pad_beg + inputs = tf.pad(inputs, + [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]]) + return slim.conv2d(inputs, num_outputs, kernel_size, stride=stride, + rate=rate, padding='VALID', scope=scope, **kwargs) + + +@slim.add_arg_scope +def stack_blocks_dense(net, blocks, + output_stride=None, + store_non_strided_activations=False, + net_mask=None, + outputs_collections=None): + """Stacks ResNet `Blocks` and controls output feature density. + + First, this function creates scopes for the ResNet in the form of + 'block_name/unit_1', 'block_name/unit_2', etc. + + Second, this function allows the user to explicitly control the ResNet + output_stride, which is the ratio of the input to output spatial resolution. + This is useful for dense prediction tasks such as semantic segmentation or + object detection. + + Most ResNets consist of 4 ResNet blocks and subsample the activations by a + factor of 2 when transitioning between consecutive ResNet blocks. This results + to a nominal ResNet output_stride equal to 8. If we set the output_stride to + half the nominal network stride (e.g., output_stride=4), then we compute + responses twice. + + Control of the output feature density is implemented by atrous convolution. + + Args: + net: A `Tensor` of size [batch, height, width, channels]. + blocks: A list of length equal to the number of ResNet `Blocks`. Each + element is a ResNet `Block` object describing the units in the `Block`. + output_stride: If `None`, then the output will be computed at the nominal + network stride. If output_stride is not `None`, it specifies the requested + ratio of input to output spatial resolution, which needs to be equal to + the product of unit strides from the start up to some level of the ResNet. + For example, if the ResNet employs units with strides 1, 2, 1, 3, 4, 1, + then valid values for the output_stride are 1, 2, 6, 24 or None (which + is equivalent to output_stride=24). + store_non_strided_activations: If True, we compute non-strided (undecimated) + activations at the last unit of each block and store them in the + `outputs_collections` before subsampling them. This gives us access to + higher resolution intermediate activations which are useful in some + dense prediction problems but increases 4x the computation and memory cost + at the last unit of each block. + net_mask: inputs valid mask. A tensor of size [batch, height, width] + outputs_collections: Collection to add the ResNet block outputs. + + Returns: + net: Output tensor with stride equal to the specified output_stride. + + Raises: + ValueError: If the target output_stride is not valid. + """ + # The current_stride variable keeps track of the effective stride of the + # activations. This allows us to invoke atrous convolution whenever applying + # the next residual unit would result in the activations having stride larger + # than the target output_stride. + current_stride = 1 + + # The atrous convolution rate parameter. + rate = 1 + + for block in blocks: + with tf.variable_scope(block.scope, 'block', [net]) as sc: + block_stride = 1 + for i, unit in enumerate(block.args): + # legacy model is implemented as a stride in the last unit + if store_non_strided_activations and i == len(block.args) - 1: + # Move stride from the block's last unit to the end of the block. + block_stride = unit.get('stride', 1) + unit = dict(unit, stride=1) + + with tf.variable_scope('unit_%d' % (i + 1), values=[net]): + # If we have reached the target output_stride, then we need to employ + # atrous convolution with stride=1 and multiply the atrous rate by the + # current unit's stride for use in subsequent layers. + if output_stride is not None and current_stride == output_stride: + net = block.unit_fn(net, inputs_mask=net_mask, rate=rate, **dict(unit, stride=1)) + rate *= unit.get('stride', 1) + + else: + net = block.unit_fn(net, inputs_mask=net_mask, rate=1, **unit) + stride = unit.get('stride', 1) + current_stride *= stride + if net_mask is not None and stride > 1: + net_mask = net_mask[:, ::stride, ::stride, :] + if output_stride is not None and current_stride > output_stride: + raise ValueError('The target output_stride cannot be reached.') + + # Collect activations at the block's end before performing subsampling. + net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net) + + # Subsampling of the block's output activations. + if output_stride is not None and current_stride == output_stride: + rate *= block_stride + else: + net = subsample(net, block_stride) + if net_mask is not None and block_stride > 1: + net_mask = net_mask[:, ::block_stride, ::block_stride, :] + current_stride *= block_stride + if output_stride is not None and current_stride > output_stride: + raise ValueError('The target output_stride cannot be reached.') + + if output_stride is not None and current_stride != output_stride: + raise ValueError('The target output_stride cannot be reached.') + + return net + + +def resnet_arg_scope(weight_decay=0.0001, + batch_norm_decay=0.997, + batch_norm_epsilon=1e-5, + batch_norm_scale=True, + activation_fn=tf.nn.relu, + use_batch_norm=True): + """Defines the default ResNet arg scope. + + TODO(gpapan): The batch-normalization related default values above are + appropriate for use in conjunction with the reference ResNet models + released at https://github.com/KaimingHe/deep-residual-networks. When + training ResNets from scratch, they might need to be tuned. + + Args: + weight_decay: The weight decay to use for regularizing the model. + batch_norm_decay: The moving average decay when estimating layer activation + statistics in batch normalization. + batch_norm_epsilon: Small constant to prevent division by zero when + normalizing activations by their variance in batch normalization. + batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the + activations in the batch normalization layer. + activation_fn: The activation function which is used in ResNet. + use_batch_norm: Whether or not to use batch normalization. + + Returns: + An `arg_scope` to use for the resnet models. + """ + batch_norm_params = { + 'decay': batch_norm_decay, + 'epsilon': batch_norm_epsilon, + 'scale': batch_norm_scale, + 'updates_collections': tf.GraphKeys.UPDATE_OPS, + 'fused': None, # Use fused batch norm if possible. + } + + with slim.arg_scope( + [slim.conv2d], + weights_regularizer=slim.l2_regularizer(weight_decay), + weights_initializer=slim.variance_scaling_initializer(), + activation_fn=activation_fn, + normalizer_fn=slim.batch_norm if use_batch_norm else None, + normalizer_params=batch_norm_params): + with slim.arg_scope([slim.batch_norm], **batch_norm_params): + # The following implies padding='SAME' for pool1, which makes feature + # alignment easier for dense prediction tasks. This is also used in + # https://github.com/facebook/fb.resnet.torch. However the accompanying + # code of 'Deep Residual Learning for Image Recognition' uses + # padding='VALID' for pool1. You can switch to that choice by setting + # slim.arg_scope([slim.max_pool2d], padding='VALID'). + with slim.arg_scope([slim.max_pool2d], padding='SAME') as arg_sc: + return arg_sc diff --git a/easy_rec/python/vision_backbones/nets/resnet_v1.py b/easy_rec/python/vision_backbones/nets/resnet_v1.py new file mode 100644 index 000000000..ee4dd37cc --- /dev/null +++ b/easy_rec/python/vision_backbones/nets/resnet_v1.py @@ -0,0 +1,1207 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Contains definitions for the original form of Residual Networks. + +The 'v1' residual networks (ResNets) implemented in this module were proposed +by: +[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun + Deep Residual Learning for Image Recognition. arXiv:1512.03385 + +Other variants were introduced in: +[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun + Identity Mappings in Deep Residual Networks. arXiv: 1603.05027 + +The networks defined in this module utilize the bottleneck building block of +[1] with projection shortcuts only for increasing depths. They employ batch +normalization *after* every weight layer. This is the architecture used by +MSRA in the Imagenet and MSCOCO 2016 competition models ResNet-101 and +ResNet-152. See [2; Fig. 1a] for a comparison between the current 'v1' +architecture and the alternative 'v2' architecture of [2] which uses batch +normalization *before* every weight layer in the so-called full pre-activation +units. + +Typical use: + + from tensorflow.contrib.slim.nets import resnet_v1 + +ResNet-101 for image classification into 1000 classes: + + # inputs has shape [batch, 224, 224, 3] + with slim.arg_scope(resnet_v1.resnet_arg_scope()): + net, end_points = resnet_v1.resnet_v1_101(inputs, 1000, is_training=False) + +ResNet-101 for semantic segmentation into 21 classes: + + # inputs has shape [batch, 513, 513, 3] + with slim.arg_scope(resnet_v1.resnet_arg_scope()): + net, end_points = resnet_v1.resnet_v1_101(inputs, + 21, + is_training=False, + global_pool=False, + output_stride=16) +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf +from collections import OrderedDict + +from easy_rec.python.vision_backbones.nets import resnet_utils +from easy_rec.python.vision_backbones import net_utils + +resnet_arg_scope = resnet_utils.resnet_arg_scope +slim = tf.contrib.slim + +def image_mask(valid_shape, + max_shape=None, + dtype=tf.bool, + name=None): + + """Returns a image mask tensor. + Args: + valid_shape: integer tensor of shape `(batch_size, 2 or 3)`, image valid shape + max_shape: integer tensor of shape `(2 or 3)`, size of y x dimension of returned tensor. + dtype: output type of the resulting tensor. + name: name of the op. + Returns: + A mask tensor of shape `(batch_size, max_shape[0], max_shape[1])`, cast to specified dtype. + """ + with tf.name_scope(name or "ImageMask"): + if max_shape is None: + max_shape = tf.reduce_max(valid_shape, axis=0) + + x = tf.range(max_shape[1]) + y = tf.range(max_shape[0]) + X, Y = tf.meshgrid(x, y) + + valid_shape_x = valid_shape[:, 1] + X = X[tf.newaxis, :, :] < valid_shape_x[:, tf.newaxis, tf.newaxis] + + valid_shape_y = valid_shape[:, 0] + Y = Y[tf.newaxis, :, :] < valid_shape_y[:, tf.newaxis, tf.newaxis] + + return tf.cast(tf.logical_and(X, Y), dtype=dtype) + + +def combined_static_and_dynamic_shape(tensor): + """Returns a list containing static and dynamic values for the dimensions. + + Returns a list of static and dynamic values for shape dimensions. This is + useful to preserve static shapes when available in reshape operation. + + Args: + tensor: A tensor of any type. + + Returns: + A list of size tensor.shape.ndims containing integers or a scalar tensor. + """ + static_tensor_shape = tensor.shape.as_list() + dynamic_tensor_shape = tf.shape(tensor) + combined_shape = [] + for index, dim in enumerate(static_tensor_shape): + if dim is not None: + combined_shape.append(dim) + else: + combined_shape.append(dynamic_tensor_shape[index]) + return combined_shape + +@slim.add_arg_scope +def squeeze_and_excitation_2d(inputs, + se_rate=16, + inputs_mask=None): + """ + squeeze and excitation block + ref to Hu, J., Shen, L., & Sun, G. (2017). Squeeze-and-Excitation Networks. CoRR. + + Args: + inputs: input tensor of size [batch_size, height, width, channels]. + se_rate: squeeze-and-excitation reduce rate. + inputs_mask: input tensor valid mask of size [batch_size, height, width]. + + Returns: + output tensor with same shape as inputs + """ + + input_shape = combined_static_and_dynamic_shape(inputs) + input_c = input_shape[-1] + + if inputs_mask is not None: + assert inputs_mask.shape.ndims == inputs.shape.ndims + assert inputs_mask.dtype == inputs.dtype + with tf.variable_scope('global_pool'): + valid_sum = tf.reduce_sum(inputs_mask, axis=[1, 2], keep_dims=True) + input = inputs * inputs_mask + conv_dw = tf.reduce_sum(input, + axis=[1, 2], + keep_dims=True) / valid_sum + else: + conv_dw = tf.reduce_mean(inputs, axis=[1, 2], keepdims=True, name='global_pool') + conv_dw = slim.conv2d(inputs=conv_dw, + num_outputs=input_c // se_rate, + kernel_size=(1, 1), + activation_fn=tf.nn.relu, + normalizer_fn=None, + scope="conv_down") + conv_up = slim.conv2d(inputs=conv_dw, + num_outputs=input_c, + kernel_size=(1, 1), + activation_fn=tf.sigmoid, + normalizer_fn=None, + scope='conv_up') + conv = tf.multiply(inputs, conv_up, name='excite') + return conv + +class NoOpScope(object): + """No-op context manager.""" + + def __enter__(self): + return None + + def __exit__(self, exc_type, exc_value, traceback): + return False + + +@slim.add_arg_scope +def basic(inputs, + depth, + stride, + rate=1, + avg_down=False, + se_rate=None, + inputs_mask=None, + outputs_collections=None, + scope=None, + use_bounded_activations=False): + """Basic residual unit variant with BN after convolutions. + + This is the original residual unit proposed in [1]. See Fig. 1(a) of [2] for + its definition. + + Args: + inputs: A tensor of size [batch, height, width, channels]. + depth: The depth of the ResNet unit output. + stride: The ResNet unit's stride. Determines the amount of downsampling of + the units output compared to its input. + rate: An integer, rate for atrous convolution. + avg_down: bool, default False + Whether to use average pooling for projection skip connection between stages/downsample. + se_rate: reduce rate for squeeze_and_excitation_2d, if None, not use SE. + inputs_mask: inputs valid mask. A tensor of size [batch, height, width]. + outputs_collections: Collection to add the ResNet unit output. + scope: Optional variable_scope. + use_bounded_activations: Whether or not to use bounded activations. Bounded + activations better lend themselves to quantized inference. + + Returns: + The ResNet unit's output. + """ + with tf.variable_scope(scope, 'basic_v1', [inputs]) as sc: + shortcut = resnet_utils.shortcut(inputs, depth=depth, stride=stride, avg_down=avg_down, + use_bounded_activations=use_bounded_activations, + scope='shortcut') + residual = resnet_utils.conv2d_same(inputs, depth, 3, stride=stride, rate=rate, scope='conv1') + residual = resnet_utils.conv2d_same(residual, depth, 3, stride=1, rate=rate, + activation_fn=None, scope='conv2') + + if se_rate is not None: + residual = squeeze_and_excitation_2d(inputs=residual, + se_rate=se_rate, + inputs_mask=inputs_mask) + + if use_bounded_activations: + # Use clip_by_value to simulate bandpass activation. + residual = tf.clip_by_value(residual, -6.0, 6.0) + output = tf.nn.relu6(shortcut + residual) + else: + output = tf.nn.relu(shortcut + residual) + + return slim.utils.collect_named_outputs(outputs_collections, + sc.name, + output) + + +@slim.add_arg_scope +def bottleneck(inputs, + depth, + depth_bottleneck, + stride, + rate=1, + middle_stride=True, + avg_down=False, + se_rate=None, + inputs_mask=None, + outputs_collections=None, + scope=None, + use_bounded_activations=False): + """Bottleneck residual unit variant with BN after convolutions. + + This is the original residual unit proposed in [1]. See Fig. 1(a) of [2] for + its definition. Note that we use here the bottleneck variant which has an + extra bottleneck layer. + + Args: + inputs: A tensor of size [batch, height, width, channels]. + depth: The depth of the ResNet unit output. + depth_bottleneck: The depth of the bottleneck layers. + stride: The ResNet unit's stride. Determines the amount of downsampling of + the units output compared to its input. + rate: An integer, rate for atrous convolution. + middle_stride: bool, default True, Whether stride is set on 3x3 layer or not. + avg_down: bool, default False + Whether to use average pooling for projection skip connection between stages/downsample. + se_rate: reduce rate for squeeze_and_excitation_2d, if None, not use SE. + inputs_mask: inputs valid mask. A tensor of size [batch, height, width]. + outputs_collections: Collection to add the ResNet unit output. + scope: Optional variable_scope. + use_bounded_activations: Whether or not to use bounded activations. Bounded + activations better lend themselves to quantized inference. + + Returns: + The ResNet unit's output. + """ + with tf.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc: + residual = slim.conv2d(inputs, depth_bottleneck, [1, 1], stride=1 if middle_stride else stride, + scope='conv1') + residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride=stride if middle_stride else 1, + rate=rate, scope='conv2') + residual = slim.conv2d(residual, depth, [1, 1], stride=1, + activation_fn=None, scope='conv3') + shortcut_depth = slim.utils.last_dimension(residual.get_shape(), min_rank=4) + shortcut = resnet_utils.shortcut(inputs, depth=shortcut_depth, stride=stride, avg_down=avg_down, + scope='shortcut') + + + if se_rate is not None: + residual = squeeze_and_excitation_2d(inputs=residual, + se_rate=se_rate, + inputs_mask=inputs_mask) + + if use_bounded_activations: + # Use clip_by_value to simulate bandpass activation. + residual = tf.clip_by_value(residual, -6.0, 6.0) + output = tf.nn.relu6(shortcut + residual) + else: + output = tf.nn.relu(shortcut + residual) + + return slim.utils.collect_named_outputs(outputs_collections, + sc.name, + output) + + +def resnet_v1(inputs, + blocks, + num_classes=None, + is_training=True, + global_pool=True, + output_stride=None, + include_root_block=True, + spatial_squeeze=True, + store_non_strided_activations=False, + deep_stem=False, + inputs_true_shape=None, + reuse=None, + scope=None): + """Generator for v1 ResNet models. + + This function generates a family of ResNet v1 models. See the resnet_v1_*() + methods for specific model instantiations, obtained by selecting different + block instantiations that produce ResNets of various depths. + + Training for image classification on Imagenet is usually done with [224, 224] + inputs, resulting in [7, 7] feature maps at the output of the last ResNet + block for the ResNets defined in [1] that have nominal stride equal to 32. + However, for dense prediction tasks we advise that one uses inputs with + spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In + this case the feature maps at the ResNet output will have spatial shape + [(height - 1) / output_stride + 1, (width - 1) / output_stride + 1] + and corners exactly aligned with the input image corners, which greatly + facilitates alignment of the features to the image. Using as input [225, 225] + images results in [8, 8] feature maps at the output of the last ResNet block. + + For dense prediction tasks, the ResNet needs to run in fully-convolutional + (FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all + have nominal stride equal to 32 and a good choice in FCN mode is to use + output_stride=16 in order to increase the density of the computed features at + small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915. + + Args: + inputs: A tensor of size [batch, height_in, width_in, channels]. + blocks: A list of length equal to the number of ResNet blocks. Each element + is a resnet_utils.Block object describing the units in the block. + num_classes: Number of predicted classes for classification tasks. + If 0 or None, we return the features before the logit layer. + is_training: whether batch_norm layers are in training mode. If this is set + to None, the callers can specify slim.batch_norm's is_training parameter + from an outer slim.arg_scope. + global_pool: If True, we perform global average pooling before computing the + logits. Set to True for image classification, False for dense prediction, + None for skip average pooling. + output_stride: If None, then the output will be computed at the nominal + network stride. If output_stride is not None, it specifies the requested + ratio of input to output spatial resolution. + include_root_block: If True, include the initial convolution followed by + max-pooling, if False excludes it. + spatial_squeeze: if True, logits is of shape [B, C], if false logits is + of shape [B, 1, 1, C], where B is batch_size and C is number of classes. + To use this parameter, the input images must be smaller than 300x300 + pixels, in which case the output logit layer does not contain spatial + information and can be removed. + store_non_strided_activations: If True, we compute non-strided (undecimated) + activations at the last unit of each block and store them in the + `outputs_collections` before subsampling them. This gives us access to + higher resolution intermediate activations which are useful in some + dense prediction problems but increases 4x the computation and memory cost + at the last unit of each block. + deep_stem: bool, default False + Whether to replace the 7x7 conv1 with 3 3x3 convolution layers. + inputs_true_shape: true shape for each example in batch. [batch, 3] + reuse: whether or not the network and its variables should be reused. To be + able to reuse 'scope' must be given. + scope: Optional variable_scope. + + Returns: + net: A rank-4 tensor of size [batch, height_out, width_out, channels_out]. + If global_pool is False, then height_out and width_out are reduced by a + factor of output_stride compared to the respective height_in and width_in, + else both height_out and width_out equal one. If num_classes is 0 or None, + then net is the output of the last ResNet block, potentially after global + average pooling. If num_classes a non-zero integer, net contains the + pre-softmax activations. + end_points: A dictionary from components of the network to the corresponding + activation. + + Raises: + ValueError: If the target output_stride is not valid. + """ + with tf.variable_scope(scope, 'resnet_v1', [inputs], reuse=reuse) as sc: + end_points_collection = sc.original_name_scope + '_end_points' + with slim.arg_scope([slim.conv2d, bottleneck, basic, + resnet_utils.stack_blocks_dense], + outputs_collections=end_points_collection): + with (slim.arg_scope([slim.batch_norm], is_training=is_training) + if is_training is not None else NoOpScope()): + net = inputs + net_mask = None + if inputs_true_shape is not None: + net_mask = image_mask( + inputs_true_shape, dtype=tf.float32)[:, :, :, tf.newaxis] + if include_root_block: + if output_stride is not None: + if output_stride % 4 != 0: + raise ValueError('The output_stride needs to be a multiple of 4.') + output_stride /= 4 + if deep_stem: + net = resnet_utils.conv2d_same(net, 32, 3, stride=2, scope='conv1') + net = resnet_utils.conv2d_same(net, 32, 3, stride=1, scope='conv2') + net = resnet_utils.conv2d_same(net, 64, 3, stride=1, scope='conv3') + else: + net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1') + net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1') + if net_mask is not None: + net_mask = net_mask[:, ::4, ::4, :] + + net = resnet_utils.stack_blocks_dense(net, blocks, + output_stride, + store_non_strided_activations, + net_mask=net_mask) + # Convert end_points_collection into a dictionary of end_points. + end_points = slim.utils.convert_collection_to_dict( + end_points_collection) + + if global_pool is not None: + # when global pool is not, skip average pooling + if global_pool: + # Global average pooling. + net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True) + end_points['global_pool'] = net + else: + # Pooling with a fixed kernel size. + kernel_size = net_utils.reduced_kernel_size_for_small_input(net, [7, 7]) + net = slim.avg_pool2d(net, kernel_size, padding='VALID', + scope='AvgPool_1a_{}x{}'.format(*kernel_size)) + end_points['AvgPool_1a'] = net + + if num_classes: + net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, + normalizer_fn=None, scope='logits') + end_points[sc.name + '/logits'] = net + if spatial_squeeze: + net = tf.squeeze(net, [1, 2], name='SpatialSqueeze') + end_points[sc.name + '/spatial_squeeze'] = net + end_points['predictions'] = slim.softmax(net, scope='predictions') + return net, end_points + + +resnet_v1.default_image_size = 224 + + +def resnet_v1_legacy_block(scope, base_depth, num_units, stride): + """Helper function for creating a resnet_v1 legacy slim-style bottleneck block, + implemented as a stride in the last unit. + + Args: + scope: The scope of the block. + base_depth: The depth of the bottleneck layer for each unit. + num_units: The number of units in the block. + stride: The stride of the block, implemented as a stride in the last unit. + All other units have stride=1. + Returns: + A resnet_v1 bottleneck block. + """ + return resnet_utils.Block(scope, bottleneck, [{ + 'depth': base_depth * 4, + 'depth_bottleneck': base_depth, + 'stride': 1 + }] * (num_units - 1) + [{ + 'depth': base_depth * 4, + 'depth_bottleneck': base_depth, + 'stride': stride + }]) + + +def resnet_v1_basic_block(scope, + base_depth, + num_units, + stride, + avg_down=False, + se_rate=None): + """Helper function for creating a resnet_v1 basic block. + implemented as a stride in the first unit. + + Args: + scope: The scope of the block. + base_depth: The depth of the bottleneck layer for each unit. + num_units: The number of units in the block. + stride: The stride of the block, implemented as a stride in the first unit. + All other units have stride=1. + avg_down: bool, default False + Whether to use average pooling for projection skip connection between stages/downsample. + se_rate: reduce rate for squeeze_and_excitation_2d, if None, not use SE. + Returns: + A resnet_v1 basic block. + """ + return resnet_utils.Block(scope, basic, [{ + 'depth': base_depth, + 'stride': stride, + 'avg_down': avg_down, + 'se_rate': se_rate + }] + [{ + 'depth': base_depth, + 'stride': 1, + 'avg_down': avg_down, + 'se_rate': se_rate + }] * (num_units - 1)) + + +def resnet_v1_bottleneck_block(scope, + base_depth, + num_units, + stride, + middle_stride=True, + avg_down=False, + se_rate=None): + """Helper function for creating a resnet_v1 bottleneck block. + implemented as a stride in the first unit. + + Args: + scope: The scope of the block. + base_depth: The depth of the bottleneck layer for each unit. + num_units: The number of units in the block. + stride: The stride of the block, implemented as a stride in the first unit. + All other units have stride=1. + middle_stride: bool, default True, Whether stride is set on 3x3 layer or not. + avg_down: bool, default False + Whether to use average pooling for projection skip connection between stages/downsample. + se_rate: reduce rate for squeeze_and_excitation_2d, if None, not use SE. + Returns: + A resnet_v1 bottleneck block. + """ + return resnet_utils.Block(scope, bottleneck, [{ + 'depth': base_depth * 4, + 'depth_bottleneck': base_depth, + 'stride': stride, + 'middle_stride': middle_stride, + 'avg_down': avg_down, + 'se_rate': se_rate + }] + [{ + 'depth': base_depth * 4, + 'depth_bottleneck': base_depth, + 'stride': 1, + 'middle_stride': middle_stride, + 'avg_down': avg_down, + 'se_rate': se_rate + }] * (num_units - 1)) + + +def resnet_v1_50(inputs, + num_classes=None, + is_training=True, + global_pool=True, + output_stride=None, + spatial_squeeze=True, + inputs_true_shape=None, + reuse=None, + include_root_block=True, + block_names=None, + block_kwargs=None, + scope='resnet_v1_50'): + """ResNet-50 model of [1]. See resnet_v1() for arg and return description.""" + block_func = resnet_v1_legacy_block + default_kwargs = OrderedDict([ + ('block1', dict(base_depth=64, num_units=3, stride=2)), + ('block2', dict(base_depth=128, num_units=4, stride=2)), + ('block3', dict(base_depth=256, num_units=6, stride=2)), + ('block4', dict(base_depth=512, num_units=3, stride=1))]) + block_names = block_names if block_names else default_kwargs.keys() + block_kwargs = block_kwargs if block_kwargs else [{}] * len(block_names) + blocks = [block_func(block_name, **dict(default_kwargs[block_name], **block_kwarg)) + for block_name, block_kwarg in zip(block_names, block_kwargs)] + return resnet_v1(inputs, blocks, num_classes, is_training, + global_pool=global_pool, output_stride=output_stride, + include_root_block=include_root_block, spatial_squeeze=spatial_squeeze, + store_non_strided_activations=True, + inputs_true_shape=inputs_true_shape, + reuse=reuse, scope=scope) + + +def resnet_v1_101(inputs, + num_classes=None, + is_training=True, + global_pool=True, + output_stride=None, + spatial_squeeze=True, + inputs_true_shape=None, + reuse=None, + include_root_block=True, + block_names=None, + block_kwargs=None, + scope='resnet_v1_101'): + """ResNet-101 model of [1]. See resnet_v1() for arg and return description.""" + block_func = resnet_v1_legacy_block + default_kwargs = OrderedDict([ + ('block1', dict(base_depth=64, num_units=3, stride=2)), + ('block2', dict(base_depth=128, num_units=4, stride=2)), + ('block3', dict(base_depth=256, num_units=23, stride=2)), + ('block4', dict(base_depth=512, num_units=3, stride=1))]) + block_names = block_names if block_names else default_kwargs.keys() + block_kwargs = block_kwargs if block_kwargs else [{}] * len(block_names) + blocks = [block_func(block_name, **dict(default_kwargs[block_name], **block_kwarg)) + for block_name, block_kwarg in zip(block_names, block_kwargs)] + return resnet_v1(inputs, blocks, num_classes, is_training, + global_pool=global_pool, output_stride=output_stride, + include_root_block=include_root_block, spatial_squeeze=spatial_squeeze, + store_non_strided_activations=True, + inputs_true_shape=inputs_true_shape, + reuse=reuse, scope=scope) + + +def resnet_v1_152(inputs, + num_classes=None, + is_training=True, + global_pool=True, + output_stride=None, + spatial_squeeze=True, + inputs_true_shape=None, + reuse=None, + include_root_block=True, + block_names=None, + block_kwargs=None, + scope='resnet_v1_152'): + """ResNet-152 model of [1]. See resnet_v1() for arg and return description.""" + block_func = resnet_v1_legacy_block + default_kwargs = OrderedDict([ + ('block1', dict(base_depth=64, num_units=3, stride=2)), + ('block2', dict(base_depth=128, num_units=8, stride=2)), + ('block3', dict(base_depth=256, num_units=36, stride=2)), + ('block4', dict(base_depth=512, num_units=3, stride=1))]) + block_names = block_names if block_names else default_kwargs.keys() + block_kwargs = block_kwargs if block_kwargs else [{}] * len(block_names) + blocks = [block_func(block_name, **dict(default_kwargs[block_name], **block_kwarg)) + for block_name, block_kwarg in zip(block_names, block_kwargs)] + return resnet_v1(inputs, blocks, num_classes, is_training, + global_pool=global_pool, output_stride=output_stride, + include_root_block=include_root_block, spatial_squeeze=spatial_squeeze, + store_non_strided_activations=True, + inputs_true_shape=inputs_true_shape, + reuse=reuse, scope=scope) + + +def resnet_v1a_18(inputs, + num_classes=None, + is_training=True, + global_pool=True, + output_stride=None, + spatial_squeeze=True, + inputs_true_shape=None, + reuse=None, + include_root_block=True, + block_names=None, + block_kwargs=None, + scope='resnet_v1a_18'): + """ResNet-18 A-variant model of [1]. See resnet_v1() for arg and return description.""" + block_func = resnet_v1_basic_block + default_kwargs = OrderedDict([ + ('block1', dict(base_depth=64, num_units=2, stride=1)), + ('block2', dict(base_depth=128, num_units=2, stride=2)), + ('block3', dict(base_depth=256, num_units=2, stride=2)), + ('block4', dict(base_depth=512, num_units=2, stride=2))]) + block_names = block_names if block_names else default_kwargs.keys() + block_kwargs = block_kwargs if block_kwargs else [{}] * len(block_names) + blocks = [block_func(block_name, **dict(default_kwargs[block_name], **block_kwarg)) + for block_name, block_kwarg in zip(block_names, block_kwargs)] + return resnet_v1(inputs, blocks, num_classes, is_training, + global_pool=global_pool, output_stride=output_stride, + include_root_block=include_root_block, spatial_squeeze=spatial_squeeze, + store_non_strided_activations=True, + inputs_true_shape=inputs_true_shape, + reuse=reuse, scope=scope) + + +def resnet_v1a_34(inputs, + num_classes=None, + is_training=True, + global_pool=True, + output_stride=None, + spatial_squeeze=True, + inputs_true_shape=None, + reuse=None, + include_root_block=True, + block_names=None, + block_kwargs=None, + scope='resnet_v1a_34'): + """ResNet-34 model A-variant of [1]. See resnet_v1() for arg and return description.""" + block_func = resnet_v1_basic_block + default_kwargs = OrderedDict([ + ('block1', dict(base_depth=64, num_units=3, stride=1)), + ('block2', dict(base_depth=128, num_units=4, stride=2)), + ('block3', dict(base_depth=256, num_units=6, stride=2)), + ('block4', dict(base_depth=512, num_units=3, stride=2))]) + block_names = block_names if block_names else default_kwargs.keys() + block_kwargs = block_kwargs if block_kwargs else [{}] * len(block_names) + blocks = [block_func(block_name, **dict(default_kwargs[block_name], **block_kwarg)) + for block_name, block_kwarg in zip(block_names, block_kwargs)] + return resnet_v1(inputs, blocks, num_classes, is_training, + global_pool=global_pool, output_stride=output_stride, + include_root_block=include_root_block, spatial_squeeze=spatial_squeeze, + store_non_strided_activations=True, + inputs_true_shape=inputs_true_shape, + reuse=reuse, scope=scope) + + +def resnet_v1a_50(inputs, + num_classes=None, + is_training=True, + global_pool=True, + output_stride=None, + spatial_squeeze=True, + inputs_true_shape=None, + reuse=None, + include_root_block=True, + block_names=None, + block_kwargs=None, + scope='resnet_v1a_50'): + """ResNet-50 model A-variant of [1]. See resnet_v1() for arg and return description.""" + block_func = resnet_v1_bottleneck_block + default_kwargs = OrderedDict([ + ('block1', dict(base_depth=64, num_units=3, stride=1, middle_stride=False)), + ('block2', dict(base_depth=128, num_units=4, stride=2, middle_stride=False)), + ('block3', dict(base_depth=256, num_units=6, stride=2, middle_stride=False)), + ('block4', dict(base_depth=512, num_units=3, stride=2, middle_stride=False))]) + block_names = block_names if block_names else default_kwargs.keys() + block_kwargs = block_kwargs if block_kwargs else [{}] * len(block_names) + blocks = [block_func(block_name, **dict(default_kwargs[block_name], **block_kwarg)) + for block_name, block_kwarg in zip(block_names, block_kwargs)] + return resnet_v1(inputs, blocks, num_classes, is_training, + global_pool=global_pool, output_stride=output_stride, + include_root_block=include_root_block, spatial_squeeze=spatial_squeeze, + store_non_strided_activations=True, + inputs_true_shape=inputs_true_shape, + reuse=reuse, scope=scope) + + +def resnet_v1a_101(inputs, + num_classes=None, + is_training=True, + global_pool=True, + output_stride=None, + spatial_squeeze=True, + inputs_true_shape=None, + reuse=None, + include_root_block=True, + block_names=None, + block_kwargs=None, + scope='resnet_v1a_101'): + """ResNet-101 model A-variant of [1]. See resnet_v1() for arg and return description.""" + block_func = resnet_v1_bottleneck_block + default_kwargs = OrderedDict([ + ('block1', dict(base_depth=64, num_units=3, stride=1, middle_stride=False)), + ('block2', dict(base_depth=128, num_units=4, stride=2, middle_stride=False)), + ('block3', dict(base_depth=256, num_units=23, stride=2, middle_stride=False)), + ('block4', dict(base_depth=512, num_units=3, stride=2, middle_stride=False))]) + block_names = block_names if block_names else default_kwargs.keys() + block_kwargs = block_kwargs if block_kwargs else [{}] * len(block_names) + blocks = [block_func(block_name, **dict(default_kwargs[block_name], **block_kwarg)) + for block_name, block_kwarg in zip(block_names, block_kwargs)] + return resnet_v1(inputs, blocks, num_classes, is_training, + global_pool=global_pool, output_stride=output_stride, + include_root_block=include_root_block, spatial_squeeze=spatial_squeeze, + store_non_strided_activations=True, + inputs_true_shape=inputs_true_shape, + reuse=reuse, scope=scope) + + +def resnet_v1a_152(inputs, + num_classes=None, + is_training=True, + global_pool=True, + output_stride=None, + spatial_squeeze=True, + inputs_true_shape=None, + reuse=None, + include_root_block=True, + block_names=None, + block_kwargs=None, + scope='resnet_v1b_152'): + """ResNet-152 model A-variant of [1]. See resnet_v1() for arg and return description.""" + block_func = resnet_v1_bottleneck_block + default_kwargs = OrderedDict([ + ('block1', dict(base_depth=64, num_units=3, stride=1, middle_stride=False)), + ('block2', dict(base_depth=128, num_units=8, stride=2, middle_stride=False)), + ('block3', dict(base_depth=256, num_units=36, stride=2, middle_stride=False)), + ('block4', dict(base_depth=512, num_units=3, stride=2, middle_stride=False))]) + block_names = block_names if block_names else default_kwargs.keys() + block_kwargs = block_kwargs if block_kwargs else [{}] * len(block_names) + blocks = [block_func(block_name, **dict(default_kwargs[block_name], **block_kwarg)) + for block_name, block_kwarg in zip(block_names, block_kwargs)] + return resnet_v1(inputs, blocks, num_classes, is_training, + global_pool=global_pool, output_stride=output_stride, + include_root_block=include_root_block, spatial_squeeze=spatial_squeeze, + store_non_strided_activations=True, + inputs_true_shape=inputs_true_shape, + reuse=reuse, scope=scope) + + +def resnet_v1b_50(inputs, + num_classes=None, + is_training=True, + global_pool=True, + output_stride=None, + spatial_squeeze=True, + inputs_true_shape=None, + reuse=None, + include_root_block=True, + block_names=None, + block_kwargs=None, + scope='resnet_v1b_50'): + """ResNet-50 model B-variant of of [1]. See resnet_v1() for arg and return description.""" + block_func = resnet_v1_bottleneck_block + default_kwargs = OrderedDict([ + ('block1', dict(base_depth=64, num_units=3, stride=1)), + ('block2', dict(base_depth=128, num_units=4, stride=2)), + ('block3', dict(base_depth=256, num_units=6, stride=2)), + ('block4', dict(base_depth=512, num_units=3, stride=2))]) + block_names = block_names if block_names else default_kwargs.keys() + block_kwargs = block_kwargs if block_kwargs else [{}] * len(block_names) + blocks = [block_func(block_name, **dict(default_kwargs[block_name], **block_kwarg)) + for block_name, block_kwarg in zip(block_names, block_kwargs)] + return resnet_v1(inputs, blocks, num_classes, is_training, + global_pool=global_pool, output_stride=output_stride, + include_root_block=include_root_block, spatial_squeeze=spatial_squeeze, + store_non_strided_activations=True, + inputs_true_shape=inputs_true_shape, + reuse=reuse, scope=scope) + + +def resnet_v1b_101(inputs, + num_classes=None, + is_training=True, + global_pool=True, + output_stride=None, + spatial_squeeze=True, + inputs_true_shape=None, + reuse=None, + include_root_block=True, + block_names=None, + block_kwargs=None, + scope='resnet_v1b_101'): + """ResNet-101 model B-variant of [1]. See resnet_v1() for arg and return description.""" + block_func = resnet_v1_bottleneck_block + default_kwargs = OrderedDict([ + ('block1', dict(base_depth=64, num_units=3, stride=1)), + ('block2', dict(base_depth=128, num_units=4, stride=2)), + ('block3', dict(base_depth=256, num_units=23, stride=2)), + ('block4', dict(base_depth=512, num_units=3, stride=2))]) + block_names = block_names if block_names else default_kwargs.keys() + block_kwargs = block_kwargs if block_kwargs else [{}] * len(block_names) + blocks = [block_func(block_name, **dict(default_kwargs[block_name], **block_kwarg)) + for block_name, block_kwarg in zip(block_names, block_kwargs)] + return resnet_v1(inputs, blocks, num_classes, is_training, + global_pool=global_pool, output_stride=output_stride, + include_root_block=include_root_block, spatial_squeeze=spatial_squeeze, + store_non_strided_activations=True, + inputs_true_shape=inputs_true_shape, + reuse=reuse, scope=scope) + + +def resnet_v1b_152(inputs, + num_classes=None, + is_training=True, + global_pool=True, + output_stride=None, + spatial_squeeze=True, + inputs_true_shape=None, + reuse=None, + include_root_block=True, + block_names=None, + block_kwargs=None, + scope='resnet_v1b_152'): + """ResNet-152 model B-variant of [1]. See resnet_v1() for arg and return description.""" + block_func = resnet_v1_bottleneck_block + default_kwargs = OrderedDict([ + ('block1', dict(base_depth=64, num_units=3, stride=1)), + ('block2', dict(base_depth=128, num_units=8, stride=2)), + ('block3', dict(base_depth=256, num_units=36, stride=2)), + ('block4', dict(base_depth=512, num_units=3, stride=2))]) + block_names = block_names if block_names else default_kwargs.keys() + block_kwargs = block_kwargs if block_kwargs else [{}] * len(block_names) + blocks = [block_func(block_name, **dict(default_kwargs[block_name], **block_kwarg)) + for block_name, block_kwarg in zip(block_names, block_kwargs)] + return resnet_v1(inputs, blocks, num_classes, is_training, + global_pool=global_pool, output_stride=output_stride, + include_root_block=include_root_block, spatial_squeeze=spatial_squeeze, + store_non_strided_activations=True, + inputs_true_shape=inputs_true_shape, + reuse=reuse, scope=scope) + + +def resnet_v1c_50(inputs, + num_classes=None, + is_training=True, + global_pool=True, + output_stride=None, + spatial_squeeze=True, + inputs_true_shape=None, + reuse=None, + include_root_block=True, + block_names=None, + block_kwargs=None, + scope='resnet_v1c_50'): + """ResNet-50 model C-variant of of [1]. See resnet_v1() for arg and return description.""" + block_func = resnet_v1_bottleneck_block + default_kwargs = OrderedDict([ + ('block1', dict(base_depth=64, num_units=3, stride=1)), + ('block2', dict(base_depth=128, num_units=4, stride=2)), + ('block3', dict(base_depth=256, num_units=6, stride=2)), + ('block4', dict(base_depth=512, num_units=3, stride=2))]) + block_names = block_names if block_names else default_kwargs.keys() + block_kwargs = block_kwargs if block_kwargs else [{}] * len(block_names) + blocks = [block_func(block_name, **dict(default_kwargs[block_name], **block_kwarg)) + for block_name, block_kwarg in zip(block_names, block_kwargs)] + return resnet_v1(inputs, blocks, num_classes, is_training, + global_pool=global_pool, output_stride=output_stride, + include_root_block=include_root_block, spatial_squeeze=spatial_squeeze, + store_non_strided_activations=True, + deep_stem=True, inputs_true_shape=inputs_true_shape, + reuse=reuse, scope=scope) + + +def resnet_v1c_101(inputs, + num_classes=None, + is_training=True, + global_pool=True, + output_stride=None, + spatial_squeeze=True, + inputs_true_shape=None, + reuse=None, + include_root_block=True, + block_names=None, + block_kwargs=None, + scope='resnet_v1c_101'): + """ResNet-101 model C-variant of [1]. See resnet_v1() for arg and return description.""" + block_func = resnet_v1_bottleneck_block + default_kwargs = OrderedDict([ + ('block1', dict(base_depth=64, num_units=3, stride=1)), + ('block2', dict(base_depth=128, num_units=4, stride=2)), + ('block3', dict(base_depth=256, num_units=23, stride=2)), + ('block4', dict(base_depth=512, num_units=3, stride=2))]) + block_names = block_names if block_names else default_kwargs.keys() + block_kwargs = block_kwargs if block_kwargs else [{}] * len(block_names) + blocks = [block_func(block_name, **dict(default_kwargs[block_name], **block_kwarg)) + for block_name, block_kwarg in zip(block_names, block_kwargs)] + return resnet_v1(inputs, blocks, num_classes, is_training, + global_pool=global_pool, output_stride=output_stride, + include_root_block=include_root_block, spatial_squeeze=spatial_squeeze, + store_non_strided_activations=True, + deep_stem=True, inputs_true_shape=inputs_true_shape, + reuse=reuse, scope=scope) + + +def resnet_v1c_152(inputs, + num_classes=None, + is_training=True, + global_pool=True, + output_stride=None, + spatial_squeeze=True, + inputs_true_shape=None, + reuse=None, + include_root_block=True, + block_names=None, + block_kwargs=None, + scope='resnet_v1c_152'): + """ResNet-152 model C-variant of [1]. See resnet_v1() for arg and return description.""" + block_func = resnet_v1_bottleneck_block + default_kwargs = OrderedDict([ + ('block1', dict(base_depth=64, num_units=3, stride=1)), + ('block2', dict(base_depth=128, num_units=8, stride=2)), + ('block3', dict(base_depth=256, num_units=36, stride=2)), + ('block4', dict(base_depth=512, num_units=3, stride=2))]) + block_names = block_names if block_names else default_kwargs.keys() + block_kwargs = block_kwargs if block_kwargs else [{}] * len(block_names) + blocks = [block_func(block_name, **dict(default_kwargs[block_name], **block_kwarg)) + for block_name, block_kwarg in zip(block_names, block_kwargs)] + return resnet_v1(inputs, blocks, num_classes, is_training, + global_pool=global_pool, output_stride=output_stride, + include_root_block=include_root_block, spatial_squeeze=spatial_squeeze, + store_non_strided_activations=True, + deep_stem=True, inputs_true_shape=inputs_true_shape, + reuse=reuse, scope=scope) + + +def resnet_v1d_50(inputs, + num_classes=None, + is_training=True, + global_pool=True, + output_stride=None, + spatial_squeeze=True, + inputs_true_shape=None, + reuse=None, + include_root_block=True, + block_names=None, + block_kwargs=None, + scope='resnet_v1d_50'): + """ResNet-50 model D-variant of of [1]. See resnet_v1() for arg and return description.""" + block_func = resnet_v1_bottleneck_block + default_kwargs = OrderedDict([ + ('block1', dict(base_depth=64, num_units=3, stride=1, avg_down=True)), + ('block2', dict(base_depth=128, num_units=4, stride=2, avg_down=True)), + ('block3', dict(base_depth=256, num_units=6, stride=2, avg_down=True)), + ('block4', dict(base_depth=512, num_units=3, stride=2, avg_down=True))]) + block_names = block_names if block_names else default_kwargs.keys() + block_kwargs = block_kwargs if block_kwargs else [{}] * len(block_names) + blocks = [block_func(block_name, **dict(default_kwargs[block_name], **block_kwarg)) + for block_name, block_kwarg in zip(block_names, block_kwargs)] + return resnet_v1(inputs, blocks, num_classes, is_training, + global_pool=global_pool, output_stride=output_stride, + include_root_block=include_root_block, spatial_squeeze=spatial_squeeze, + store_non_strided_activations=True, + deep_stem=True, inputs_true_shape=inputs_true_shape, + reuse=reuse, scope=scope) + + +def resnet_v1d_101(inputs, + num_classes=None, + is_training=True, + global_pool=True, + output_stride=None, + spatial_squeeze=True, + inputs_true_shape=None, + reuse=None, + include_root_block=True, + block_names=None, + block_kwargs=None, + scope='resnet_v1d_101'): + """ResNet-101 model D-variant of [1]. See resnet_v1() for arg and return description.""" + block_func = resnet_v1_bottleneck_block + default_kwargs = OrderedDict([ + ('block1', dict(base_depth=64, num_units=3, stride=1, avg_down=True)), + ('block2', dict(base_depth=128, num_units=4, stride=2, avg_down=True)), + ('block3', dict(base_depth=256, num_units=23, stride=2, avg_down=True)), + ('block4', dict(base_depth=512, num_units=3, stride=2, avg_down=True))]) + block_names = block_names if block_names else default_kwargs.keys() + block_kwargs = block_kwargs if block_kwargs else [{}] * len(block_names) + blocks = [block_func(block_name, **dict(default_kwargs[block_name], **block_kwarg)) + for block_name, block_kwarg in zip(block_names, block_kwargs)] + return resnet_v1(inputs, blocks, num_classes, is_training, + global_pool=global_pool, output_stride=output_stride, + include_root_block=include_root_block, spatial_squeeze=spatial_squeeze, + store_non_strided_activations=True, + deep_stem=True, inputs_true_shape=inputs_true_shape, + reuse=reuse, scope=scope) + + +def resnet_v1d_152(inputs, + num_classes=None, + is_training=True, + global_pool=True, + output_stride=None, + spatial_squeeze=True, + inputs_true_shape=None, + reuse=None, + include_root_block=True, + block_names=None, + block_kwargs=None, + scope='resnet_v1d_152'): + """ResNet-152 model D-variant of [1]. See resnet_v1() for arg and return description.""" + block_func = resnet_v1_bottleneck_block + default_kwargs = OrderedDict([ + ('block1', dict(base_depth=64, num_units=3, stride=1, avg_down=True)), + ('block2', dict(base_depth=128, num_units=8, stride=2, avg_down=True)), + ('block3', dict(base_depth=256, num_units=36, stride=2, avg_down=True)), + ('block4', dict(base_depth=512, num_units=3, stride=2, avg_down=True))]) + block_names = block_names if block_names else default_kwargs.keys() + block_kwargs = block_kwargs if block_kwargs else [{}] * len(block_names) + blocks = [block_func(block_name, **dict(default_kwargs[block_name], **block_kwarg)) + for block_name, block_kwarg in zip(block_names, block_kwargs)] + return resnet_v1(inputs, blocks, num_classes, is_training, + global_pool=global_pool, output_stride=output_stride, + include_root_block=include_root_block, spatial_squeeze=spatial_squeeze, + store_non_strided_activations=True, + deep_stem=True, inputs_true_shape=inputs_true_shape, + reuse=reuse, scope=scope) + + +# def resnet_v1_200(inputs, +# num_classes=None, +# is_training=True, +# global_pool=True, +# output_stride=None, +# spatial_squeeze=True, +# inputs_true_shape=None, +# reuse=None, +# scope='resnet_v1_200'): +# """ResNet-200 model of [2]. See resnet_v1() for arg and return description.""" +# blocks = [ +# resnet_v1_block('block1', base_depth=64, num_units=3, stride=2), +# resnet_v1_block('block2', base_depth=128, num_units=24, stride=2), +# resnet_v1_block('block3', base_depth=256, num_units=36, stride=2), +# resnet_v1_block('block4', base_depth=512, num_units=3, stride=1) +# ] +# return resnet_v1(inputs, blocks, num_classes, is_training, +# global_pool=global_pool, output_stride=output_stride, +# include_root_block=True, spatial_squeeze=spatial_squeeze, +# inputs_true_shape=inputs_true_shape, +# reuse=reuse, scope=scope) + + +def se_resnet_v1_50(inputs, + num_classes=None, + is_training=True, + global_pool=True, + output_stride=None, + spatial_squeeze=True, + inputs_true_shape=None, + reuse=None, + include_root_block=True, + block_names=None, + block_kwargs=None, + scope='se_resnet_v1_50'): + """SE ResNet-50 model of [1].""" + block_func = resnet_v1_bottleneck_block + default_kwargs = OrderedDict([ + ('block1', dict(base_depth=64, num_units=3, stride=1, middle_stride=False, se_rate=16)), + ('block2', dict(base_depth=128, num_units=4, stride=2, middle_stride=False, se_rate=16)), + ('block3', dict(base_depth=256, num_units=6, stride=2, middle_stride=False, se_rate=16)), + ('block4', dict(base_depth=512, num_units=3, stride=2, middle_stride=False, se_rate=16))]) + block_names = block_names if block_names else default_kwargs.keys() + block_kwargs = block_kwargs if block_kwargs else [{}] * len(block_names) + blocks = [block_func(block_name, **dict(default_kwargs[block_name], **block_kwarg)) + for block_name, block_kwarg in zip(block_names, block_kwargs)] + return resnet_v1(inputs, blocks, num_classes, is_training, + global_pool=global_pool, output_stride=output_stride, + include_root_block=include_root_block, spatial_squeeze=spatial_squeeze, + store_non_strided_activations=True, + inputs_true_shape=inputs_true_shape, + reuse=reuse, scope=scope) + + +def se_resnet_v1_101(inputs, + num_classes=None, + is_training=True, + global_pool=True, + output_stride=None, + spatial_squeeze=True, + inputs_true_shape=None, + reuse=None, + include_root_block=True, + block_names=None, + block_kwargs=None, + scope='se_resnet_v1_101'): + """SE ResNet-101 model of [1].""" + block_func = resnet_v1_bottleneck_block + default_kwargs = OrderedDict([ + ('block1', dict(base_depth=64, num_units=3, stride=1, middle_stride=False, se_rate=16)), + ('block2', dict(base_depth=128, num_units=4, stride=2, middle_stride=False, se_rate=16)), + ('block3', dict(base_depth=256, num_units=23, stride=2, middle_stride=False, se_rate=16)), + ('block4', dict(base_depth=512, num_units=3, stride=2, middle_stride=False, se_rate=16))]) + block_names = block_names if block_names else default_kwargs.keys() + block_kwargs = block_kwargs if block_kwargs else [{}] * len(block_names) + blocks = [block_func(block_name, **dict(default_kwargs[block_name], **block_kwarg)) + for block_name, block_kwarg in zip(block_names, block_kwargs)] + return resnet_v1(inputs, blocks, num_classes, is_training, + global_pool=global_pool, output_stride=output_stride, + include_root_block=include_root_block, spatial_squeeze=spatial_squeeze, + store_non_strided_activations=True, + inputs_true_shape=inputs_true_shape, + reuse=reuse, scope=scope) + + +def se_resnet_v1_152(inputs, + num_classes=None, + is_training=True, + global_pool=True, + output_stride=None, + spatial_squeeze=True, + inputs_true_shape=None, + reuse=None, + include_root_block=True, + block_names=None, + block_kwargs=None, + scope='se_resnet_v1_152'): + """SE ResNet-152 model of [1].""" + block_func = resnet_v1_bottleneck_block + default_kwargs = OrderedDict([ + ('block1', dict(base_depth=64, num_units=3, stride=1, middle_stride=False, se_rate=16)), + ('block2', dict(base_depth=128, num_units=8, stride=2, middle_stride=False, se_rate=16)), + ('block3', dict(base_depth=256, num_units=36, stride=2, middle_stride=False, se_rate=16)), + ('block4', dict(base_depth=512, num_units=3, stride=2, middle_stride=False, se_rate=16))]) + block_names = block_names if block_names else default_kwargs.keys() + block_kwargs = block_kwargs if block_kwargs else [{}] * len(block_names) + blocks = [block_func(block_name, **dict(default_kwargs[block_name], **block_kwarg)) + for block_name, block_kwarg in zip(block_names, block_kwargs)] + return resnet_v1(inputs, blocks, num_classes, is_training, + global_pool=global_pool, output_stride=output_stride, + include_root_block=include_root_block, spatial_squeeze=spatial_squeeze, + store_non_strided_activations=True, + inputs_true_shape=inputs_true_shape, + reuse=reuse, scope=scope) + + +def get_scopes_of_levels(scope, with_logits=True): + """ + Args: + scope: scope name for resnet_v1 + with_logits: with classification layer or not. + return a list of variable scope list order by levels. + """ + scopes_of_levels = [[scope + "/block4"], + [scope + "/block3"], + [scope + "/block2"], + [scope + "/block1"], + [scope + "/conv1", scope + '/conv2', scope + '/conv3']] + if with_logits: + return [[scope + "/logits"]] + scopes_of_levels + else: + return scopes_of_levels diff --git a/easy_rec/python/vision_backbones/nets/resnet_v2.py b/easy_rec/python/vision_backbones/nets/resnet_v2.py new file mode 100644 index 000000000..65403f8bc --- /dev/null +++ b/easy_rec/python/vision_backbones/nets/resnet_v2.py @@ -0,0 +1,374 @@ +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Contains definitions for the preactivation form of Residual Networks. + +Residual networks (ResNets) were originally proposed in: +[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun + Deep Residual Learning for Image Recognition. arXiv:1512.03385 + +The full preactivation 'v2' ResNet variant implemented in this module was +introduced by: +[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun + Identity Mappings in Deep Residual Networks. arXiv: 1603.05027 + +The key difference of the full preactivation 'v2' variant compared to the +'v1' variant in [1] is the use of batch normalization before every weight layer. + +Typical use: + + from tensorflow.contrib.slim.nets import resnet_v2 + +ResNet-101 for image classification into 1000 classes: + + # inputs has shape [batch, 224, 224, 3] + with slim.arg_scope(resnet_v2.resnet_arg_scope()): + net, end_points = resnet_v2.resnet_v2_101(inputs, 1000, is_training=False) + +ResNet-101 for semantic segmentation into 21 classes: + + # inputs has shape [batch, 513, 513, 3] + with slim.arg_scope(resnet_v2.resnet_arg_scope()): + net, end_points = resnet_v2.resnet_v2_101(inputs, + 21, + is_training=False, + global_pool=False, + output_stride=16) +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +from collections import OrderedDict + +from easy_rec.python.vision_backbones.nets import resnet_utils + +slim = tf.contrib.slim +resnet_arg_scope = resnet_utils.resnet_arg_scope + + +@slim.add_arg_scope +def bottleneck(inputs, depth, depth_bottleneck, stride, rate=1, + inputs_mask=None, + outputs_collections=None, scope=None): + """Bottleneck residual unit variant with BN before convolutions. + + This is the full preactivation residual unit variant proposed in [2]. See + Fig. 1(b) of [2] for its definition. Note that we use here the bottleneck + variant which has an extra bottleneck layer. + + When putting together two consecutive ResNet blocks that use this unit, one + should use stride = 2 in the last unit of the first block. + + Args: + inputs: A tensor of size [batch, height, width, channels]. + depth: The depth of the ResNet unit output. + depth_bottleneck: The depth of the bottleneck layers. + stride: The ResNet unit's stride. Determines the amount of downsampling of + the units output compared to its input. + rate: An integer, rate for atrous convolution. + inputs_mask: inputs valid mask. NOT IMPLEMENTED NOW! + A tensor of size [batch, height, width]. + outputs_collections: Collection to add the ResNet unit output. + scope: Optional variable_scope. + + Returns: + The ResNet unit's output. + """ + with tf.variable_scope(scope, 'bottleneck_v2', [inputs]) as sc: + depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4) + preact = slim.batch_norm(inputs, activation_fn=tf.nn.relu, scope='preact') + if depth == depth_in: + shortcut = resnet_utils.subsample(inputs, stride, 'shortcut') + else: + shortcut = slim.conv2d(preact, depth, [1, 1], stride=stride, + normalizer_fn=None, activation_fn=None, + scope='shortcut') + + residual = slim.conv2d(preact, depth_bottleneck, [1, 1], stride=1, + scope='conv1') + residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride, + rate=rate, scope='conv2') + residual = slim.conv2d(residual, depth, [1, 1], stride=1, + normalizer_fn=None, activation_fn=None, + scope='conv3') + + output = shortcut + residual + + return slim.utils.collect_named_outputs(outputs_collections, + sc.name, + output) + + +def resnet_v2(inputs, + blocks, + num_classes=None, + is_training=True, + global_pool=True, + output_stride=None, + include_root_block=True, + spatial_squeeze=True, + reuse=None, + scope=None): + """Generator for v2 (preactivation) ResNet models. + + This function generates a family of ResNet v2 models. See the resnet_v2_*() + methods for specific model instantiations, obtained by selecting different + block instantiations that produce ResNets of various depths. + + Training for image classification on Imagenet is usually done with [224, 224] + inputs, resulting in [7, 7] feature maps at the output of the last ResNet + block for the ResNets defined in [1] that have nominal stride equal to 32. + However, for dense prediction tasks we advise that one uses inputs with + spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In + this case the feature maps at the ResNet output will have spatial shape + [(height - 1) / output_stride + 1, (width - 1) / output_stride + 1] + and corners exactly aligned with the input image corners, which greatly + facilitates alignment of the features to the image. Using as input [225, 225] + images results in [8, 8] feature maps at the output of the last ResNet block. + + For dense prediction tasks, the ResNet needs to run in fully-convolutional + (FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all + have nominal stride equal to 32 and a good choice in FCN mode is to use + output_stride=16 in order to increase the density of the computed features at + small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915. + + Args: + inputs: A tensor of size [batch, height_in, width_in, channels]. + blocks: A list of length equal to the number of ResNet blocks. Each element + is a resnet_utils.Block object describing the units in the block. + num_classes: Number of predicted classes for classification tasks. + If 0 or None, we return the features before the logit layer. + is_training: whether batch_norm layers are in training mode. + global_pool: If True, we perform global average pooling before computing the + logits. Set to True for image classification, False for dense prediction. + output_stride: If None, then the output will be computed at the nominal + network stride. If output_stride is not None, it specifies the requested + ratio of input to output spatial resolution. + include_root_block: If True, include the initial convolution followed by + max-pooling, if False excludes it. If excluded, `inputs` should be the + results of an activation-less convolution. + spatial_squeeze: if True, logits is of shape [B, C], if false logits is + of shape [B, 1, 1, C], where B is batch_size and C is number of classes. + To use this parameter, the input images must be smaller than 300x300 + pixels, in which case the output logit layer does not contain spatial + information and can be removed. + reuse: whether or not the network and its variables should be reused. To be + able to reuse 'scope' must be given. + scope: Optional variable_scope. + + + Returns: + net: A rank-4 tensor of size [batch, height_out, width_out, channels_out]. + If global_pool is False, then height_out and width_out are reduced by a + factor of output_stride compared to the respective height_in and width_in, + else both height_out and width_out equal one. If num_classes is 0 or None, + then net is the output of the last ResNet block, potentially after global + average pooling. If num_classes is a non-zero integer, net contains the + pre-softmax activations. + end_points: A dictionary from components of the network to the corresponding + activation. + + Raises: + ValueError: If the target output_stride is not valid. + """ + with tf.variable_scope(scope, 'resnet_v2', [inputs], reuse=reuse) as sc: + end_points_collection = sc.original_name_scope + '_end_points' + with slim.arg_scope([slim.conv2d, bottleneck, + resnet_utils.stack_blocks_dense], + outputs_collections=end_points_collection): + with slim.arg_scope([slim.batch_norm], is_training=is_training): + net = inputs + if include_root_block: + if output_stride is not None: + if output_stride % 4 != 0: + raise ValueError('The output_stride needs to be a multiple of 4.') + output_stride /= 4 + # We do not include batch normalization or activation functions in + # conv1 because the first ResNet unit will perform these. Cf. + # Appendix of [2]. + with slim.arg_scope([slim.conv2d], + activation_fn=None, normalizer_fn=None): + net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1') + net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1') + net = resnet_utils.stack_blocks_dense(net, blocks, output_stride) + # This is needed because the pre-activation variant does not have batch + # normalization or activation functions in the residual unit output. See + # Appendix of [2]. + net = slim.batch_norm(net, activation_fn=tf.nn.relu, scope='postnorm') + # Convert end_points_collection into a dictionary of end_points. + end_points = slim.utils.convert_collection_to_dict( + end_points_collection) + + if global_pool: + # Global average pooling. + net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True) + end_points['global_pool'] = net + if num_classes: + net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, + normalizer_fn=None, scope='logits') + end_points[sc.name + '/logits'] = net + if spatial_squeeze: + net = tf.squeeze(net, [1, 2], name='SpatialSqueeze') + end_points[sc.name + '/spatial_squeeze'] = net + end_points['predictions'] = slim.softmax(net, scope='predictions') + return net, end_points +resnet_v2.default_image_size = 224 + + +def resnet_v2_block(scope, base_depth, num_units, stride): + """Helper function for creating a resnet_v2 bottleneck block. + + Args: + scope: The scope of the block. + base_depth: The depth of the bottleneck layer for each unit. + num_units: The number of units in the block. + stride: The stride of the block, implemented as a stride in the last unit. + All other units have stride=1. + + Returns: + A resnet_v2 bottleneck block. + """ + return resnet_utils.Block(scope, bottleneck, [{ + 'depth': base_depth * 4, + 'depth_bottleneck': base_depth, + 'stride': 1 + }] * (num_units - 1) + [{ + 'depth': base_depth * 4, + 'depth_bottleneck': base_depth, + 'stride': stride + }]) +resnet_v2.default_image_size = 224 + + +def resnet_v2_50(inputs, + num_classes=None, + is_training=True, + global_pool=True, + output_stride=None, + spatial_squeeze=True, + inputs_true_shape=None, + reuse=None, + include_root_block=True, + block_names=None, + block_kwargs=None, + scope='resnet_v2_50'): + """ResNet-50 model of [1]. See resnet_v2() for arg and return description.""" + block_func = resnet_v2_block + default_kwargs = OrderedDict([ + ('block1', dict(base_depth=64, num_units=3, stride=2)), + ('block2', dict(base_depth=128, num_units=4, stride=2)), + ('block3', dict(base_depth=256, num_units=6, stride=2)), + ('block4', dict(base_depth=512, num_units=3, stride=1))]) + block_names = block_names if block_names else default_kwargs.keys() + block_kwargs = block_kwargs if block_kwargs else [{}] * len(block_names) + blocks = [block_func(block_name, **dict(default_kwargs[block_name], **block_kwarg)) + for block_name, block_kwarg in zip(block_names, block_kwargs)] + return resnet_v2(inputs, blocks, num_classes, is_training=is_training, + global_pool=global_pool, output_stride=output_stride, + include_root_block=include_root_block, spatial_squeeze=spatial_squeeze, + reuse=reuse, scope=scope) +resnet_v2_50.default_image_size = resnet_v2.default_image_size + + +def resnet_v2_101(inputs, + num_classes=None, + is_training=True, + global_pool=True, + output_stride=None, + spatial_squeeze=True, + inputs_true_shape=None, + reuse=None, + include_root_block=True, + block_names=None, + block_kwargs=None, + scope='resnet_v2_101'): + """ResNet-101 model of [1]. See resnet_v2() for arg and return description.""" + block_func = resnet_v2_block + default_kwargs = OrderedDict([ + ('block1', dict(base_depth=64, num_units=3, stride=2)), + ('block2', dict(base_depth=128, num_units=4, stride=2)), + ('block3', dict(base_depth=256, num_units=23, stride=2)), + ('block4', dict(base_depth=512, num_units=3, stride=1))]) + block_names = block_names if block_names else default_kwargs.keys() + block_kwargs = block_kwargs if block_kwargs else [{}] * len(block_names) + blocks = [block_func(block_name, **dict(default_kwargs[block_name], **block_kwarg)) + for block_name, block_kwarg in zip(block_names, block_kwargs)] + return resnet_v2(inputs, blocks, num_classes, is_training=is_training, + global_pool=global_pool, output_stride=output_stride, + include_root_block=include_root_block, spatial_squeeze=spatial_squeeze, + reuse=reuse, scope=scope) +resnet_v2_101.default_image_size = resnet_v2.default_image_size + + +def resnet_v2_152(inputs, + num_classes=None, + is_training=True, + global_pool=True, + output_stride=None, + spatial_squeeze=True, + inputs_true_shape=None, + reuse=None, + include_root_block=True, + block_names=None, + block_kwargs=None, + scope='resnet_v2_152'): + """ResNet-152 model of [1]. See resnet_v2() for arg and return description.""" + block_func = resnet_v2_block + default_kwargs = OrderedDict([ + ('block1', dict(base_depth=64, num_units=3, stride=2)), + ('block2', dict(base_depth=128, num_units=8, stride=2)), + ('block3', dict(base_depth=256, num_units=36, stride=2)), + ('block4', dict(base_depth=512, num_units=3, stride=1))]) + block_names = block_names if block_names else default_kwargs.keys() + block_kwargs = block_kwargs if block_kwargs else [{}] * len(block_names) + blocks = [block_func(block_name, **dict(default_kwargs[block_name], **block_kwarg)) + for block_name, block_kwarg in zip(block_names, block_kwargs)] + return resnet_v2(inputs, blocks, num_classes, is_training=is_training, + global_pool=global_pool, output_stride=output_stride, + include_root_block=include_root_block, spatial_squeeze=spatial_squeeze, + reuse=reuse, scope=scope) +resnet_v2_152.default_image_size = resnet_v2.default_image_size + + +def resnet_v2_200(inputs, + num_classes=None, + is_training=True, + global_pool=True, + output_stride=None, + spatial_squeeze=True, + inputs_true_shape=None, + reuse=None, + include_root_block=True, + block_names=None, + block_kwargs=None, + scope='resnet_v2_200'): + """ResNet-200 model of [2]. See resnet_v2() for arg and return description.""" + block_func = resnet_v2_block + default_kwargs = OrderedDict([ + ('block1', dict(base_depth=64, num_units=3, stride=2)), + ('block2', dict(base_depth=128, num_units=24, stride=2)), + ('block3', dict(base_depth=256, num_units=36, stride=2)), + ('block4', dict(base_depth=512, num_units=3, stride=1))]) + block_names = block_names if block_names else default_kwargs.keys() + block_kwargs = block_kwargs if block_kwargs else [{}] * len(block_names) + blocks = [block_func(block_name, **dict(default_kwargs[block_name], **block_kwarg)) + for block_name, block_kwarg in zip(block_names, block_kwargs)] + return resnet_v2(inputs, blocks, num_classes, is_training=is_training, + global_pool=global_pool, output_stride=output_stride, + include_root_block=include_root_block, spatial_squeeze=spatial_squeeze, + reuse=reuse, scope=scope) +resnet_v2_200.default_image_size = resnet_v2.default_image_size From 2f90b693f7206d8806a134cc7b352bcc1c72962c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=99=93=E6=82=A6?= Date: Thu, 10 Mar 2022 17:57:11 +0800 Subject: [PATCH 8/9] update test data --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a223b9c2c..87f7111e2 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -23,7 +23,7 @@ jobs: source activate /home/admin/tf12_py2/ if [ ! -e "/tmp/easyrec_data_20220113.tar.gz" ] then - wget https://easyrec.oss-cn-beijing.aliyuncs.com/data/easyrec_data_20220113.tar.gz -O /tmp/easyrec_data_20220113.tar.gz + wget https://easyrec.oss-cn-beijing.aliyuncs.com/data/easyrec_data_20220310.tar.gz -O /tmp/easyrec_data_20220310.tar.gz fi tar -zvxf /tmp/easyrec_data_20220113.tar.gz source scripts/ci_test.sh From b45f068fbda73c5520246fd6a366922ac5136b34 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=99=93=E6=82=A6?= Date: Fri, 11 Mar 2022 15:37:03 +0800 Subject: [PATCH 9/9] fix test data --- .github/workflows/ci.yml | 4 ++-- easy_rec/python/input/input.py | 42 +++++++++++++++++++--------------- 2 files changed, 26 insertions(+), 20 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 87f7111e2..9db79aec4 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -21,11 +21,11 @@ jobs: TEST_DEVICES: "" run: | source activate /home/admin/tf12_py2/ - if [ ! -e "/tmp/easyrec_data_20220113.tar.gz" ] + if [ ! -e "/tmp/easyrec_data_20220310.tar.gz" ] then wget https://easyrec.oss-cn-beijing.aliyuncs.com/data/easyrec_data_20220310.tar.gz -O /tmp/easyrec_data_20220310.tar.gz fi - tar -zvxf /tmp/easyrec_data_20220113.tar.gz + tar -zvxf /tmp/easyrec_data_20220310.tar.gz source scripts/ci_test.sh - name: LabelAndComment env: diff --git a/easy_rec/python/input/input.py b/easy_rec/python/input/input.py index c3708c7cb..0c61902cc 100644 --- a/easy_rec/python/input/input.py +++ b/easy_rec/python/input/input.py @@ -4,10 +4,11 @@ from abc import abstractmethod from collections import OrderedDict +import cv2 +import numpy as np import six import tensorflow as tf -import numpy as np -import cv2 + from easy_rec.python.core import sampler as sampler_lib from easy_rec.python.protos.dataset_pb2 import DatasetConfig from easy_rec.python.utils import config_util @@ -164,9 +165,10 @@ def create_multi_placeholders(self, export_config): ] img_fea_info = None if export_config.img_input_name and export_config.img_shape: - img_fea_info = { - "input_name": export_config.img_input_name, - "input_shape": export_config.img_shape} + img_fea_info = { + 'input_name': export_config.img_input_name, + 'input_shape': export_config.img_shape + } if self._data_config.HasField('sample_weight'): effective_fids = effective_fids[:-1] @@ -187,7 +189,7 @@ def create_multi_placeholders(self, export_config): height = img_fea_info['input_shape'].height channel = img_fea_info['input_shape'].channel finput = tf.placeholder( - tf.float32, [None, width, height, channel], name=input_name) + tf.float32, [None, width, height, channel], name=input_name) else: ftype = self._input_field_types[fid] tf_type = self.get_tf_type(ftype) @@ -293,7 +295,8 @@ def _preprocess(self, field_dict): parsed_dict[k] = v self._appended_fields.append(k) - print("[input] all feature names: {}".format([fc.feature_name for fc in self._feature_configs])) + print('[input] all feature names: {}'.format( + [fc.feature_name for fc in self._feature_configs])) for fc in self._feature_configs: feature_name = fc.feature_name feature_type = fc.feature_type @@ -557,13 +560,15 @@ def _preprocess(self, field_dict): parsed_dict[input_0] = tf.string_to_number( parsed_dict[input_0], tf.int32, name='%s_str_2_int' % input_0) elif feature_type == fc.ImgFeature: + def _load_img(img_paths): img_feas = [] for img_path in img_paths: if isinstance(img_path, bytes): - img_path = img_path.decode("utf-8") + img_path = img_path.decode('utf-8') if tf.gfile.Exists(img_path): - img_fea = np.asarray(bytearray(tf.gfile.FastGFile(img_path, 'rb').read())) + img_fea = np.asarray( + bytearray(tf.gfile.FastGFile(img_path, 'rb').read())) img_fea = cv2.imdecode(img_fea, cv2.IMREAD_COLOR) else: img_fea = np.zeros(shape=(224, 224, 3)) @@ -583,15 +588,15 @@ def _repeat_sample(sample_nums): sample_num_feas = [] idx = 0 for sample_num in sample_nums: - if isinstance(sample_num, bytes): - sample_num = sample_num.decode("utf-8") - if isinstance(sample_num, str): - sample_num = int(sample_num) - sample_num_feas.extend([idx]*sample_num) - idx+=1 + assert isinstance(sample_num, str) or isinstance(sample_num, bytes) or isinstance(sample_num, int) + sample_num = int(sample_num) + sample_num_feas.extend([idx] * sample_num) + idx += 1 sample_num_feas = np.array(sample_num_feas) return sample_num_feas - sample_num_fea = tf.py_func(_repeat_sample, [field_dict[input_0]], Tout=tf.int64) + + sample_num_fea = tf.py_func( + _repeat_sample, [field_dict[input_0]], Tout=tf.int64) parsed_dict[input_0] = sample_num_fea else: for input_name in fc.input_names: @@ -725,7 +730,8 @@ def _input_fn(mode=None, params=None, config=None): return tf.estimator.export.ServingInputReceiver(features, inputs) else: inputs, features = self.create_placeholders(export_config) - print("built feature placeholders. features: {}".format(features.keys())) + print('built feature placeholders. features: {}'.format( + features.keys())) return tf.estimator.export.ServingInputReceiver(features, inputs) - return _input_fn \ No newline at end of file + return _input_fn