From adfdaa32988b158004f252d7de4cb199c7b27503 Mon Sep 17 00:00:00 2001 From: Nicolas Legrand Date: Sat, 20 Apr 2024 16:37:07 +0200 Subject: [PATCH] add a navigation panel to the documentation (#178) * remove optional .md files * various updates on tutorials * reorganize the documentation layout * add descriptions --- .github/workflows/docs.yml | 2 +- docs/source/conf.py | 20 +- docs/source/images/multilevel-hgf.png | Bin 0 -> 61868 bytes docs/source/learn.md | 69 +- .../notebooks/0.1-Creating_networks.ipynb | 5 +- .../source/notebooks/0.1-Creating_networks.md | 524 ----------- docs/source/notebooks/0.2-Theory.ipynb | 5 +- docs/source/notebooks/0.2-Theory.md | 389 -------- docs/source/notebooks/1.1-Binary_HGF.ipynb | 5 +- docs/source/notebooks/1.1-Binary_HGF.md | 391 --------- .../notebooks/1.2-Categorical_HGF.ipynb | 5 +- docs/source/notebooks/1.2-Categorical_HGF.md | 343 -------- .../source/notebooks/1.3-Continuous_HGF.ipynb | 5 +- docs/source/notebooks/1.3-Continuous_HGF.md | 392 --------- .../2-Using_custom_response_functions.ipynb | 5 +- .../2-Using_custom_response_functions.md | 331 ------- docs/source/notebooks/3-Multilevel_HGF.ipynb | 179 ++-- docs/source/notebooks/3-Multilevel_HGF.md | 242 ----- .../notebooks/4-Parameter_recovery.ipynb | 5 +- docs/source/notebooks/4-Parameter_recovery.md | 253 ------ .../Example_1_Heart_rate_variability.ipynb | 3 - .../Example_1_Heart_rate_variability.md | 159 ---- ...ple_2_Input_node_volatility_coupling.ipynb | 5 +- ...xample_2_Input_node_volatility_coupling.md | 218 ----- .../Example_3_Multi_armed_bandit.ipynb | 5 +- .../notebooks/Example_3_Multi_armed_bandit.md | 621 ------------- .../notebooks/Exercise_1_Using_the_HGF.ipynb | 5 +- .../notebooks/Exercise_1_Using_the_HGF.md | 830 ------------------ requirements-docs.txt | 3 +- src/pyhgf/distribution.py | 2 +- 30 files changed, 166 insertions(+), 4855 deletions(-) create mode 100644 docs/source/images/multilevel-hgf.png delete mode 100644 docs/source/notebooks/0.1-Creating_networks.md delete mode 100644 docs/source/notebooks/0.2-Theory.md delete mode 100644 docs/source/notebooks/1.1-Binary_HGF.md delete mode 100644 docs/source/notebooks/1.2-Categorical_HGF.md delete mode 100644 docs/source/notebooks/1.3-Continuous_HGF.md delete mode 100644 docs/source/notebooks/2-Using_custom_response_functions.md delete mode 100644 docs/source/notebooks/3-Multilevel_HGF.md delete mode 100644 docs/source/notebooks/4-Parameter_recovery.md delete mode 100644 docs/source/notebooks/Example_1_Heart_rate_variability.md delete mode 100644 docs/source/notebooks/Example_2_Input_node_volatility_coupling.md delete mode 100644 docs/source/notebooks/Example_3_Multi_armed_bandit.md delete mode 100644 docs/source/notebooks/Exercise_1_Using_the_HGF.md diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 0f1669f4d..4048b51c4 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -26,7 +26,7 @@ jobs: run: | sudo apt-get install graphviz pip install -r requirements-docs.txt - sphinx-build -j 2 -T -b html docs/source docs/build/html + sphinx-build -j 4 -T -b html docs/source docs/build/html - name: Deploy 🚀 uses: JamesIves/github-pages-deploy-action@v4 diff --git a/docs/source/conf.py b/docs/source/conf.py index 7997d0730..9de35d105 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -51,6 +51,12 @@ panels_add_bootstrap_css = False +intersphinx_mapping = { + "python": ("https://docs.python.org/3", None), + "numpy": ("https://numpy.org/doc/stable", None), + "jax": ("https://jax.readthedocs.io/en/latest", None), +} + # Generate the API documentation when building autosummary_generate = True numpydoc_show_class_members = False @@ -65,16 +71,16 @@ plot_html_show_formats = False plot_html_show_source_link = False -source_suffix = ['.rst', '.md'] +source_suffix = { + '.rst': 'restructuredtext', + '.md': 'myst-nb', + '.ipynb': 'myst-nb', + '.myst': 'myst-nb', +} # The master toctree document. master_doc = "index" -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This pattern also affects html_static_path and html_extra_path. -exclude_patterns = ["notebooks/*.ipynb"] - # -- Options for HTML output ------------------------------------------------- html_logo = "images/logo_small.svg" @@ -105,6 +111,7 @@ "logo": { "text": "pyhgf", }, + "show_nav_level": 2 } myst_enable_extensions = ["dollarmath", "colon_fence"] @@ -112,6 +119,5 @@ html_sidebars = { "api": [], "cite": [], - "learn": [], "references": [], } diff --git a/docs/source/images/multilevel-hgf.png b/docs/source/images/multilevel-hgf.png new file mode 100644 index 0000000000000000000000000000000000000000..31a891d2635e800b9c8ebc5929e40ffcc4630c3c GIT binary patch literal 61868 zcmc$_hg*}`7d0An)SnsxkX{r81JZjRM0)SNs3V97XlT-V??M1+K@pMO zYk;WqE?ru3_lxu0=lTADdwoU+V{+be_St*wvQ~nW6lAC<=qOMq6cy&t0~HkNcs&Yr zl4I=dM;nxN#3ob9dcoUJXMTy-^Zbh5Cs<>eOS=Ht9- z?(A&uB+SEO^Z&kq+s@I9hl%-y3oLTV{*ks53Po*%{63N`k!68GtzO1FxToeGzc}Wm zrDjGVt$NL}J-z`=#rceA0~KfIvSDR7kiV>E%*2nrKDAnSfams@+s%C(Dw!i0PX2If zg;*4A=o@pi@)Rw%UiRUSh;b}4dfo&QD(s3n5=2`2`*CVB8>_3zGaU*2WfrP|OF20? zo;y>~?Ec5H`SsfVv!H*g?ng(Hn9!s3M#f zlF$CTq=$z`j>E*)s`U;Lm;K)kdnJaxI~kta%LP>~3%SZ^(w@IQopgaCKbfiNxz0OS zRWE05o*h8V#EsFZb}cv%)8eGXS@dQOt?E|T_P@Ez+gIb^^zq|IWX)J{pQ_ZlE=wcoiV_E93JJnf_8qWF>ywh%yrMcRddy`||IFaHs{z4**@s8A>TC3fxF5-z zia}Uyb77FD++Rt%=p+gyDX-4Zw5}+=lMZ*z===MJ9OLq(kV~B_xPOhTh&a z%as#naOT~g#q02^U+HORX*Dvh(C{-xao7HDIwQha-mE<~quhodi#>Bi@cS>F@|7L1 z&eIaDjV*$m;o9i>bbEN?6Xi#b9_4&M`Nx3kXWwG-{`D+VAt6^P@JtT;7WUf8uOsbR zg7WQ8=)o1bKg**)Z#RAx1$VG=Pde z(~an(X<%S5KU9*2IOd>%m;4RsH*6*UV`O3~Vifb7#SI#$s;FQ`$}G~phR|Cr3=~w6 zNqa8q?E;2to9E7-hkN5W_|-sd*^?e5?7C>q#K|P$`Z%0baX{11CnF-_g3D~zy#n2e z0n)+V=5(AL2O9l=RUy7OskNa&>ct5vl{D!PG=@RM)wy!3KnGXV=2l=$+AQ+3fG3XN ztfGZo=FxtBWXF$;5F`BRE_9tl&xHbP`bVdk+~nlsFUHetF*h)XD9Lx%BF9N>YPA^cGT(>s+1+@D{#actQZtZz z?*$qI#;8CK2d^=gELD#fYM~@E?x2$6F?Z0HT^0uT;X%ek-`TEfX>d~kXydiX=76xU zu#zW@6nms4LIr(YU0ncrVw>paCO8w!Q)m9@j)C`T<2l3iRvtVb`m)=&$Bg=nL9Hk5 zTJxp5)?<8I@?AmXMGzChhN>*%RMeGWni!&UZkhDDzhH$dHYm!XEOXuXY03bw?-=L>>k}=^q4$5u{d11<+HlFD!sit zMuqtr$mBcwt43s{ruXerK@k!9a2EMH3Q6KNM7QNpp@v(hZ}9W?zZdf=ZxC;|^#*Qh z;zuCUUG(Kr({>J5eVZB5S}RZR67T7Tp$faCYt80kRW3n$R1^FBb$zY^0s<4^isH8^ zC@HDn!!;(Da2~_PEI$s#E&F%JUJogPvNvuyFDMJYg-~=+nxF{(Q}?1 zGoapK=sCv>(e=4b;?^xCxGFr_Vy-76lD<3wL8i+ADV-8zJ=H7)mmxO*r`c{9CUpQY zxR=s%NrqK4KzVE|bG;;2G4GKC{7 zP_DJP83X>Am5w!g<_B+Y>FgX#$ArJGRYYaKK18bRQ76R`8(3|t$fjaIpG{Cu3tQ&4 zGEULzf&;uuhMUNNWCUH;_&yf;?Bu8`igGG%CMp$KmP5w2otnt9iD z)SPH*)$Is(6y;tAIFl_OcXy=Pt>_GsxSlmRL|{A{vQgA;y*Twejk%I0#@>HVo#%6& zRia`P)p~J~dSJNJEHW}OBRxI6RUCi4EncYf%hI5y7?MYG=kF#~;O=P`|MLtEd}V9+ z_0MwBz+JFtc)Hxy89xA3hR~Y@xq^(`l)5}?LQd0cJUpF7OiPL=H34o-@xM@o3VT~i z!8WCv`di!EZtpF;R!MK(ym?LKM)Snjm>C1eOegAYAJp{J%uEg&dPPX(C+T%1OM-}7 zRJiIY(M*Sv)@6BA`y@4EU!i_ApX;Jl;P&cgN*)ADZ?49dH+sNlQfaw#1Q!r~qRU=DkiPpP0$E;m&-uy6#0Qc_i~SoID(!*Jo;xzu03><>tLE3uI8 z(K*G%N)iVf;b@^Y^XPQ_YFEsM@f}U<@>sPTRv}(6rL3&%9C7_+?wWfg=SaCtURRe= zfkCZ~Toh-C;zm$Vkn-clZ{WI~kiCzf)ESQ3qi7%#2{)Dxo9afCt$aR^rqQ$SDv$Ni z(TQz%GmniIaVz|?gy0e7bVNENd+cP_(Z((D>o;!j?1;u;$D5{1{qUsS1vo?steYw@ z?k)vLsSc1cxxCDc0h<-W#cNs9VoWO4maS_4L-Z?$h)zLGJqYn=$KziFn`{bW(uA4{)>upN$cK8EVArXAbwWH$`7WM@OSAx{~kRIX&^g50aeW4-adFL{X1spXTOfY`W*>{BXta zxQvQQ4-HYq+}u3ogEvF~HXU4SAyi~p3Gip>g%3s;yf3E1O!Xa+{JI9&dU0<4>VCTb zk&~5`l}o4WoA{|O1Ek5H!^EDQl4P4v?~?PZ3Pboh`^P@)P$k@L0Ho(}kjc878VhIjb_%Qp^B``Ov{0MZDBAdKVBM=7qhmRpPr>@ z3!b#e13)ge(IvIJSlStpl(bUpw=!Pqx!JF|RQj0XMR0I1X)PK*bkQjyf8|p-_5jd) z*1mfB7`14!>(YqD=6rwEQaQnSap-p7y2;b0vrzE%OoT}oOmmhu-b-@-dCE)TVZ*Hr zsAB-0$FYHdGC`fCK38?1Mh<#t9^CkZ+_eg|od{>{M!%-v926RCP8xCl$0flK?TDkf zxw&x}){PnrmD4oxY-LFtV8+MCUx$Vc7>9{v0@`2(Uh)#%C#bj#YP1Rq3q5}ZvC3|a zfS+Ob#0aWIi;Dx4b?~No(_kXYM zet$t75gwklw_L3V#nXImR@nXf_xsW@=aF|qCf;ZE`k%(X&}=k!b}EW_ZK?h9PqOF6 zY>K}>3UQum*RCNw5ku(q+FH-g#^C;P8(kl`YD-A?kUlb@Bu$2*V)$XnA(h# z<+N!V`0kiM&9^XU=%)@Pd4HB76BPCLk?aQ>OYuF}jr?5$`TJXYyS&8yuNMe(rbq>H z@bRgYnsvGgRsfzvap`8V$j4?wtb&`wm_p%SLG-O|)3)JQ^Ms(Q&%N_?{Vo*TgOv^w z;3GveH#NRNiWL}hRt;*pYc=qg=)E2SmfTpbUdhFTv&<7Jiem5DHQ%A-fG?n_DWd>n7rcN=5I{FkXG7Z!AvV^GQ0l_4x` zM4`-w+A6HCKvy6^)I$ZbM^BkW?}f8xQw9ojqg$;YCS{<;!$N%m_>FU2(s}vvrO=i2 zE-7kuVPT!^ajzmMQfQzDNKZ%a?Ci`zA=N$l(TN{gf5&E3y5;3%q#0!N*-*~N$Y8&H zTNyHY50SJb04Zi<+-~(g6n{v9G4MNZdHL_Zv)|s`0PoYVwJk)tj*4;5gJds1%PR>%>9Ue?BcgpL5KQd|*kes~s*uea}T;`o_`- zPL5u{mb1TPs`fiFJMgz^a39y}?(QC`@hGJeu+8^csnsmPld6W_!DmYT$AYX4zhGK( z^8cD9{COGm|MTSLaAW_Ujgbz=E!kHfLWIj!UTv{5D0%7x-08V9Ilatch(BT3*Qc0p zRJf(_r5`PrM+paA#L^Fkk!q`?(^8hbB{^U}WC3^`Jn% z>fS2hT*Tku*>B3;$T1prHpv|X1epK+okAyU`gT-o^yY0YSkd0ZfpBhP+G)j7=5cDn zmbCoA=J(!h^Uw0eCM8;rVZrRzoB>9cOas9kk{b5H%;!2=H=CYE3K!2LH;B7Y3*eUe zG^pryzvU%HJAQC-o>qH(W?3w~tj~GRfr@Tmur1K8e?wapj-xSbhd3zP^q%6({DCGZ z@A>ni9`2sm+sXf6RP>%c@5qdgMC<$YY8@Q3XHEWW@s>dO(?A-?KH}))h+B+S-`$Bb zyl^q*AL8=zkHVC+lR2H=|8b$7&vi{8MEy2`eqC}^S)|OU(eyxko0^$PgdH>Ag-bB- z-qvE?HJ94? zh`D$GG@bokrrlXwSfuB4e!PCX3pw1)V!+UL#ubLND7C81R)M)7k)%lU*xql>G4iB7 zGh+qpTFi?~+0xw6pRUU?`>`VN$vy=Nnd$X+*QFoq{^<4C^%-Y^e~(1(2(0$XyUF+e z8`soQq}*WT8^W8VvhlTa#k~I~4x7&I_PD3F=peH3B|J^ZB9u`$lOQve_onQ$e6`bu zM9-z7Xf8`Ty~~-b%ibE`ce@09VppFCi~18Cl%)EDBUezl?|d+On@N7Yxi~q2p$eC| zZtaS33H=*|)Lh#HQL2WnPU3#9K%$RKfYEH#Lf~8^bz&9|63HBTMn)-&Rdcr=+P>D1 zT(RACC+%g>t@y4dt!#cGA1E>=GVZ$W%vkmpK4eZmj#5*H`Y4(1k(iSPd1r=ocXN-Z zuBF8Wld%-z6LSM6ZB>+rj5NMhGp~sn3oir7iaQ=V;Jn2KzyoscgOVv6R~{~d=HBJc3;OQB;cNo-|^Y};TD;QL^|J<=%V;YV+nA6QmK($B$+f zMf;{E`-(ko=Lc^VTlEWjq*7uq32RGwJQ8-6Ij^u10OS`pDWFL zgf(#gO)hgA^mRH0+fSC>*-aB_X>V3n!ZXebj(&;^OMxS{_<$Y>0K<+|w)HJu_Ee7wu=eo4t9Uq6Wt)I zy){fOQI(k+vm+|K2-s{7Rlk1m;>Fsv?)A0J?f%4sy^W+n34W1tU>{5WB=Mx8(RjV% z8_SvEFP^tf#g~#Cs{t}El=5rn4oF9|3W4EIB8ES@wLDT}wtNt{w!X&i_|v}j_jswJ zLv58oU}Q!LzR19R<}zl0n&VWr$YTXa-Tw2Oyf@_5@#R~j#GM)4B9E<#5Wn*h@(C9D zkHWQKzk85UQQ-;lnBMSGQeoR0&g0uFInm?fYVZ))Gsm%_g`(T4(z#I46p8Od!Bg8q zk##NSqlJPaeD6vXjNtmZZog>R9OY%IQeS3VeocrD!8DICy z4AYZk*EtY$qnU*jiCdS_{XunM2R`jOQXQS!#4LqJ2!Z5WIyyRbp@Qq4ve9jtH9C}k zZXl2$@1JVwVe7of^q3wZ(OZR~8Ydvs-5+P)tF^gEpB=w=Q&lKrcMVo89kKF?1e3C{8!5i5 zqeUfKB2LWm{@_yu7>+CHgt+HC4j)CJ*fyP~?Nf)?ia^(Hx5#X$!%a!%aQ@!X#Hh>; zU9+5B(M>7p|6oH&m2^uDyRqblFX|dRic+IOA`i-B^+$&R(hF#P{AK^S6i~NoABwgL z{u?h%C9j}X0^nvxmRy~~#~epWUm5Kf?Oo$f6que#LZJY2L;B^3DeWdzuk+jNPmCkj zFQZU2VaT?g7c5+l024t%jP)Hj)(kan-dvB@ zDYr^7IQIN;vr=bPBFkf@8<^eI^_|S85S^k3%$=$`AckS{Ywk;KP8~P{4@HZ)nHDD z=N6S3!lCUs1m$SMNnao*EnXpQzd&1j*-Wt<&&_GJ;LvO>=i+m2yNc=D(0kG#SS`Hw!S=($0mV5kPn9qAxpr6IeURa>**+B1nn&y}CF8eFuC z(`72ITOrOIq(t=QOb0-;`saM;oHB3TH^H{)?yo%=l>eW)7!wbJh_{~Zz{B@oQl$<+ zD#_#EdQWI3FR~T8xw%lzTe@CR{cCe6rBF=dCaZ|%I$r0G+rBq%$GXT$#HFyb)90;p ziJLaPLwmSaS&fFIF=Ht42kn!5DE~ESD3~}N0i^n%#B2thIyJoi2B|J;FH`Qr_;PGg zlAYsqIO6-p0n*bRu8FC|MQg{{b?egZK`uhJYIBx6Tn?Ke67bKh_0DFVmkFQht(O^; zzhnx#(ibp#Psf*EG-=F=^WFIq@b!P4bYks8;3VK;^$>j9&SD4%P~BTP>bk>@V>OL7f@XO0?L zTG<&AJke+`0=3qdpwkEN7$!eW1=rAzMzE5S1+A}c+3%@0tBwv1w}sj>^d*er(N>?t zZ;16C-kT)cTZ^`u)*FQ@@=?(h(E)Gh%w6L-OuD|2py@KSV7Jhzuul7PxKn3VBvOA- zy+eq4=Y_vYC-gK1858=;jjg7h&R9;_S+#}2EuKVfF`g^NF5y7z=dKaXceOPb#>Q6A zOEF`D{dE2qTf=Q#0pM;8ufb~BF#|lrbY9lZ zZ-c~&k+PH4qxtXdpV-Etxr}HbioKAdUEzwUc#P$9oqr@Gw5qw7q32iX;*ioP!S&AQ z5=$0@pX5XBUrTn_bPVs;=0@B+x35RZYOjrrQq*mOOD8qCbn>7`2=9gcB#TbX%q&ze z`9`D1>&KW}6C?DC;Ov(YLUhWGjBAzfI@ zr%ZL-h`8{)D4V!EsXItmp9v~5LkD_eHU5y8GSx8EHW=Hui2t7 zG(0?9T9NS@Mu*hOft;X_7IG`wNh{xqa*0#WrqTvCPV)6PWj7CEu+Rx1h3(@te%7JeQdM-dWZ+p=W15rrl^J25`;Ycz{ zauYFCVfRP|JsOiT5%8@!2(QsaUz{N+|~W!v4W}?B!^Vff>;h{OKKxe!)9``kW3Yt zbXOWGYnCi_rX^vVa-i)}WKwhyzuZ6^l47L68NSn@zcSA`uU$s!j_)L~_E1#|;N;SQ zXJ~F}l7VqK-CgVV@QSng400TCqgSCOS9%!Sh%9z?c8G`uM$1(*iC)f=-`+VYP{T#P zwx9oEVWVZ6&~@u$EDwYh<3TmY&!H3YbxWQh3?k)iofTS~(FfrP3HnEJj-arUPh26a zJMPl9Yb^_KJ}9&;WXa08g7CS>VD)2Vr8%;+o7>5pATRt>E${3PNv1VZgTSJ z!Au;rv!G9sUcwlk>*%GAl~v}z`S1P==5Zd>cgyiupQhQc1%oIV&>oaMIDY*2d^f8k z7~{5x!pB{k{`;3XBG6MaQ-=LQ^8*EWFl3Lc>>M86$t^vC;zlYUy<@tqq4#d=#>J;Q zF2Vp&QFQg+(Xla{=cXmqrCarNKHuw~A-n;f?Wmv&jAGShZgO(=Q3;GD>tllENA5_v ztkcati}4%ycWywN-)nn0vZBK6Wi1nIDIyG<@8C{T)o&B|xPyb8nIx;;X?aUxe)!gv zu?#2`G+h`$6w#h2R;>ryDSQKro_(Z3l4#n$vkBPrXSj3}Y20fClsSel@lCG%&4o!2 zcIk2I6wBJ*_pSwjN`^QQln|iN$)vZdQ4SN|*96uku{C^=0Lxby{iMPI;Ho}WIF4eYgwV3Y{SgJblohf zNv*G!?9Wkc8o%N7_~JlI6NRTSUqPu9vG@Q3x7?9AxY(BqY=rngk^n-BKS#V0z zl~(L;NB}KAb-v=lMAu#+`&^7SxjaZ(8@rxinxs*+$G$l~me&$yCD-a|LjtJ+M|P4+ zds1ISsc^LVzUL8CE(_S;DmyTOpb_d4AbO`ICtuwJM)mgX+XyACk*}2kJc5b5qed__AySooDSUGL)$O{WBOD>SrsbA%A@+Ep6En!R`t2o#5In zvF*_`;Q3l041s~_e|t^J8Dux`suP`rZw+)fvTZefWMAMF%qPBvAiVPP7cXcoU(RU` zXSEvsU^3+!qN1wm^6UFOjEcRzeaQiEu5Xi)k`SE&a5x65-XPX^{pytx@F~dC6J$kl zE->F*eUQ(%+Y9qcpj21DRLPmuu?|ZonY@!>IzISA6l|3VDLa_tMaL!>J}* z#^&A%exZrUua5}I3PkJ#DJ=Y{&Gxx+mYTMi*0#1jAgOatGP+BKiEX_{)Lp>(MYvg` z>Kjj^vKzOS2xgB!VU&`R@->FnY;mt{e@#vb+npkHvSDy=P^-c=!G}ol*`v}l6Oh7| zICtOK+XC_(v@bv$jsQsoR9UH6Sa+R|R|5>0K{?~_t@xi$M{nG?(O{$t+`R+XlpgyA z_|6uiB(7-m1drcN@Vn?}GvYRoPjuKf*Q5e2col(gq+Ft?%n#X*pD!>sp!VrX{={5l zrlj=tTJsR-Zj*B*U6{1CdRTRRha@S$$-3^&HpqYd1 z0bMs#5pANkw?y{6bwBnDf57pOT-u3|o^K#n0yfjed3kggNU__e8*XV78Q46!Q!_?? ziR?a0j-2z3Zs{*Jk}~d2eP|+l5tVGM_1AgSQ+<~Mki@711({UZ!(iGu&}4Q4Ll(?nERq%6x`S_F@|?X^yT8^7$ww6^EFkU{ z_kd5z1QI@SuFBoRV;EoFt^k~T2Xj~vglFHHZc%tQ7{PVb&}`E z5jM?TpUT8MK|t4Es4pV+`}+%%7Ain#-AETbLcTWdm@C1?6VyOuwqwFL;GG^%i~#H_ z;=Rpm!neOoP!pP7+*Nnb)1?CnsRr?AP{;_?l(z@a#6?EO!8m+SCBY7q1y!2^x-^$A zA(8`TX68BY&WgI7X?pntVfBTnd)B5}nK1&R_AeR&|BCzc;-nn*b3-tZJ0>C`9V7z!a2Fb& z0std{YS*O;5TLzTib$r}^0BMk9LV62WFK0K*cj&)&*ZbN zsGo)+4&fQ&?CNrWc&H_RZju#R%9S8)za95nk*@`L7;F}WlFiAV(7_-K7kX}}#2DLh zw=iUwF7Ke4x}~94DvdQP8{F(>(5qKnNbEvTS^oa@qj9Ucs*2+VmlRB9`X%_%)n@kR z^NOx8o;-eh4wOk6!L(c(oqi-$#Hu+<2B>5fq@l%ve97RaE8%oh@V1@)#cO84E|-rJ zo{R5ouSBE8*4ubJr(?{xcXTdxXxu(6g$PJNpix`Y`xh1;Ml;qO7Y6 zhlu*nRx6+7np;{NzbVD8V|qZw!qqHnY%%-(9@6i6RR3;)#`M>ZhLKt?w-Rbc2r9P1 z{D~3DZT=t&O*H7K8y^pkjXA?tBszK@C`*+}deb=T07u;A4KU!s+_PMtU8< z7Evw9CAM~Uc9J&3BuLqQqF+31c88~i;FrE2xDVBJ4b%X#_()ShAT$x zXmrCiS`#b4C9(Mha(8%ngU7}>FhIY$3T`=qqSKfRy!^mckyH~^KaUfw)p|?8+V-}l zqvHq2yP$1`oP1yVD=u1P#KET4b6jVKV2Qy7;!6#@3q9Xh30My)SxdWuAP0QgZVv2( zaLS-B;)ApoXbhOojA>G28eEM(zq{>sZgayb4EUd%$J*Rr5@43;5O)DVX#^O-wSVR) zD&h^)IT6ZVzS`V5E#)h}Uk13o=Spac1RtGyC&S3z|B;qc>ndh<#gCi^UNUY(9Iy3i zWiRQk@mP0;`sa?=4CuZ;_$g(B#y|&WnE~1~4i1hM--Wz((7J%glpSZ;FDouCt|{u4 zr%})csFon^qhr<)Ut&jnc~lMvsoBX@)h+QKCQHKziIJx{;;;nlRhfzJn-MpQyl?OS zhv_%g6Y<)zhH_}{LlcvPuO7&wcpMBpJ!8@AH+iIB^f`a_oKhy|MGj6*HJ#W^?>8Pt zlAq`u6tN#kjvDzeFw#)1KU02&AQO6p9Ru<#qnu8Yo4?Afhd&I}fh2a0<9w7gnREb} z;>Uj6@flp0^DK|wK0WAiD1j_2t5QNO8qEvkC#GkGP(yD85?8E`JrRBx|+ zaivVi8b<5!e4cBsECmW{HKdwOW0CG_xNZqDiF!1*{syRLg|CQ!*cxHnFY1Cs0&=W4 zrYDKKKSS}D5`{yi`)n_Oq@5i~nSg<{^1@c5Sp_0Eo9|8wL?R4lJEnt_SD+o|d;0Bf zev6(5#$VqkKX~x(!EYHxj|Yfi%8#_pgh;CpwCqZe;xG2KnCLk+71QEI=Yb7eLjQMJZ9Ub{?@l zdusWY*=Ouw*@c=6TtNm`!-N@4W$rX;GIeyz6#3*sx3-XAM>2ttem(QAo6;z@4Csd* zT9<9*K!f>Zl8>L5{O%YUQ*w4yR?KMthHM~X)K-BR+^{a-PbrH;#w4->?=*jH7je9ZQ%}!8`(I-Ce%{C54cC(kD_#WAw^{k@m zxZ69VgFG~=v z=c$F2JPl;Zl%+xUx8>3?1^Nqc%TD=vW#B0=DpPe*Q$KcpPZnEV71*rj$S5AAR2?rXUXY!7d-YXN%5*?ZD)41DFe5FefuC|b zJcM2)ilwa^XJ+Oh@Jj=D|8uFX$aR4Yv{m~^>0b7l{8pH~aO?(Mmug2xhj7_;aVV3x z5fV1Xkp)a9XQr&Y9@L;|>-oY#jkQbDo;zFpQMrmywCqn(g<3@`RE1aOP6nz2O?uyk-m>+A^$^-QxB5xtkaY28q5&K6QE< zB_Jn}uf#={*f^EJ-5XLPqub1wT`J0V=%rT}HE=_x2Ka2>xj2oNIUu27s47=o*C>9q(9ix2XS3kb&)t)!%sJezvdy@+&gI^Y@FLXr)331eS5 zTZO{U8oEvT4PTW|_;Wx=?|r$9O!)fm!F6+AWA_ca)61(~VW97_I1W+twhxbt@L2R< zhJ85V%O1Wi@Q@wIvVA%MwP;45%T>9CMv{>^V=h8P>D}*(x`Y8qi6o-sjz58g0k|YG zSDv1{%-pzg7nxp4hLqb5OJfGMSn~r_g|EHvff$eK*L3 z98jzDnJE>cVUxhap&M?M0V#_qlVA@H9TWMM^(+h~o9yD2fe43yTmm!V*&9(>7{Hwi z^?6v|hWlvDkA;euc;JPTr$J=bBXh@n63V`Q*bd z2r|E-DBU72cV|`W>FGaKctMFZ_1=x*@yG zy=&(#+hHr#l4P^mn&@*(oj3^R>sKvG4dPRdOOtaEP2xc%|3oe}_{yl4R#tfDH(F^z~<7aKN&wxDFC`vL%)9sYhS>CiYaiL=^Zczpn?Q?H=|3e zGHWL+@c1vAxK#OefJ3DO8+|JqzjLESfuf>g@(QX>&a)#56EY z)c+&6w|_e1WoB(V<@}WeV>tIq_*Ox!qRE#`wnG|7O`_!8s&KT60!VWZ{p7HN1f9Leq7E4$;R+n)2L6fqKY}B~jr33uU=dh8g?RgH&>yTH7 zFyQb(eV!IMb0f74BO)UMvd8*^3#i>8=VE`(9Yfqw2ZGviW=kKN?khD%)>ro2IQ157 zL!AWy5gBq+&|ingbioxQv%o_&7b#1XPJHLk8#*cK$m?@B>$&MIl@8x|fpDB0d|zV! z@YLd%J{Bp`pdvbyVks@y5Ulk>#4069@p-(v{$SOJu_)YGY$!lhrxduf*)tlM{9MYMpO9+P5=4B2eo{?^@eiSua}g5y?=XCIqpmWl*#Yfj-K>y z0s2a^AS&iv#+M;943s5k>+?Hi56^b@H!TTry!do zYan<6lPrF#ezlM|fjc5+3ZH=ipZ_t8rP0GuyjQ?qEoq^F%@&s8d-vaO903K(L*#L^ zI3XSEh0&y8BLBUWXP>Zto<}!BNa`Y2-`V(I3HQI3nQ(~g>(I*~dHUGO+ z^=_Bj(#?l7G}142x0A_k zR2Qu6UfgH0hc{A!|8rRt9REeyAm4rXPj~84$yo;$R)DW($i-R^rc;mK+Uq8zvwYY- zLZq{D1P!Er?$r1324P4kdSiN*DdW;*Ub&q3-7%!!A|8!AK6?j%B-;fP2sp7(VL~co z8mETj`x~?8uYy$n52uT|ZBY`5<UTp=N-DjOsJr z4?dZ!F${{rn7No--J(Ka7qbC5`GGE?CKmzs*67lx4wMR4ErM&w0&vpl>19*0DE4n$ z1C)14rN|ZM4ej}DFI1n{)os)mxL?S(=th>P0!M8Kk1_}t8MT} zibK4(M_w7pG9yZ2dJqq2Z9i0P`Fs4iSbFKb@Z8ULk9h+O|JNV)=^^(*hgtQCcozj9 z)=39hcJ9hT8pOtbnGaQe0S0k$-ACUa0FIiJSrS!T7W&lzLAIC~5FtpP*5Mk2&#BDX zdw$iGmG9|KM1F>H6Es_xo~>61N`QK4;(MZ$uWv}Ey@C?`2wQ+F;5*rG8zFJ zjzd5WC?LBPkxuVgh1ZJ@9Cf#TT65SiwVl=IBcZeHl11m;A<8e#Gg{yDXN`q&+>#rT ztmSbl@(OeecDsIq9Oak4mz;$y0W>++A-9toYYTAfW=*t!fXPM@Yc`E5>!R}R@ag`| z+%$zF4DF#WWc#?XgP~tXR69_d{pm3>z=Wz#yA$3*6Oia{FowqLxiTpE`aT0ZOEx{k z_90r+gadgndlPg6!p=6VK}dU;yfqA%@1#95xv*lzbUtPNm(?huzsQ5@gDi~dSIwgFmRM$)~oo%8|TRT2p4X-Wba^+I8ky}`}A0d-joCsz3K@~=K zx}f`A<0hkGX(72#<{TV z=>?@lgDKOQ+(0=goXvN`pF-ziMPvYZ`PZjY@EB=xzC6gxJ1;t*BEW)}bJ-yXH0 zOTy_jPC~hZ+0TTj&-DE|$-uUHG_3A9bBoDs(s}0j;BDz#JKZ;D&nJJWL^n!*wVM3$!=$4^ z!ZG@;A6})pVo}M{@pZhOzFJXAMXZZefo;Q!{*}!28P0D!T`go%>aU%J2*=5E(VF?} z-ei=#`!p@T^>FskM>o6BL79quiT<3G41AG?rptm?q!7ppD({L6Jg{mVJeuG;yQXN6G_I zpbHOq&ZxGn=<=-&>+8!o#U1#wxJ1 z$AAAv!vOo_*OW6&MLW*#6qdy@2R<{lF{*=m&@li0Ic`?XfKf_L9v`1vFA#ceM% zhNO_#IFh860gmvp(@-=2w@v2pBJYAUkuwoH9$q)jZ8j&6)r?HbYvT1xF6j6@!^TCX zoyP>aT%Rj0-!Idtz^}i3&U%Kk_aokG@rFs3S>&9MuCGoSQ)0Qo`nUHKcy8TnYS4qP%;CT^~iIZ>GEl-gdM{?yNDOJZVl zc$QEYSth5cQ}7D@cRSOS*TnGs_k@V%vXniuqv!{eaDgz2l8j;d`N0%_0)|`_3X4 z1vK}xmUI|`h(%0^q@>#4ON-N~p~tH8w4*h)jFGTxG81=oAAdPPyeL4H;jueN(%C2r zjnfhG%30}J4`By9$i6FgB~u8Wx>V4A}G&?FRtPeavBESE?1UeLhd^8k<;J-l|m;;LBjvC!@eV;T&5l zNNy$jq>biZp{jIA*Eyk~VEE`6M%Omax5oB|IZX-+A-5G5^mHmE^>wghIy+6e18|Gj0Fz!mka8sS2z&CEpczHrqp)cso%fAZZiJDd*w#5K1tCn zuJYBdM~fgF%^iFK3_QF0SRjm#|6;b4Cerwa4v)mey4E#(>^UvsK!us)OA4Fl_Lt*q zsl~Frc``OX3;OVeUn_6oUaW$!mb;+e+AF${2s6UHuicKcd(V<P;W>0z}K4vis&!r)EB*oV7(d@kZzREo! znn!HUWYOkwyH&*)HFNKKm&)cgEn-Uhw3S*XA`&Ss*27nkdtlzTlQNKSEDD%!N3kpX z-o`F+ZoeoNZ1^pNcQ#?*!>$}rZNK|^Wp!X`rcacC$F5QSt+CiKd-0vgU-xU~`#n8U zVcRd3Tc?52+s)cS{$t~~Ubge&OrmGhy^XCL)3~!X+LeT28RI|u`R^q0E??j|X|a#H zS93W;bbtKuh$bpL=|f!qFx~{sV7NjRlc*xu$(Q;7wob%x0ltZE+Wxl74jV}=M1@3X zRyo4WemZT*qKlh&kM}4awb%tbDGj9GTqItc)a+Lj@92g;5vUud3k#CDVyV*~Z#vTS z^I4f^)0#6cnOYXwuaHuF5D)eozDKli0!0|4aeE11TrB0ev7YWejU< ztY?;RZKc`SSby=WT-1+s*M~%CnxkboaFllxLQ@$ZUFIrHxpmBaChTF8&2xc^T;*Rf z2+Fi?43)3AX*HF&b7$UW(L$rQ6QwA{zo=0-7OxCG`5uDPT;0BRE?*TV7Md4Ns-!r0 z9eo0*$$#hn^cFtI-opsrqqq5v zIcCa^&N6FrUwI_f$0OCq%dQbOve%K>?sS`8Y*HXsxyLdpG%Qk<`DV+HL`{m~g~2g? z=bw-I6Ue#fwzpI$0>*Ry3jYz!Z>uN@)*ALTmw(yOA-WyKrm)c2?W;+i^19StPSYXh zs***Bod>xqGn0hhSROMw_v|~K2=4o44pj;yc0 z_Pa&NE1S|Eph5g?S)x=tSmbyfhi$oM!}r`;XW&R$pkYl)H?CBgpRDM6H3}M9Rm+Fu zJ8!p3`nC(h7lgXB*;dCyur0_`QeG$rhsR_>l-zgDrLTU#GA1kwd%;WAy=U?LlLgkI z)VfV6O{?XF$7}TL@m0GdgtR-iDl^XPreo9kGA->cnvR>SwLXkI42-~FIB2-8S1N;T zFdoh&3v<3Vs;#v+5wKuicl(@{K&^IuV`dP9_Q5-`tF~jV8?SoTt<|YFe((@db1}Z4 zAwp}$5|coh)AHa`3#Q@)ms{7Q_wSFRqKF$!e{gcR{7ZX0w=3AQqgUg^RUK?_DD_^k zMFPLP4?Vat)y;VwVdjyvtR6ce;_DfpIry<$2s=lgnh5?g-CHqAjCY|Vzza!1&rd_k zZp&PETDaCSeU_zpb1Sdzp%imFDX(qO`%%6cIacSMOkGOdWOBab!x6WGuWM7*+wPCpC#0(a_HtB`~CRdu(gS-KNqvc9Bk0nhuPj zaI4npgZ5Br2my(BjpJR$AH4MEd5q5Zq|Y|(kGs7dbJ8<7ZJ2cUwAax=xgqwNi{*mA zQe)A1vs4}dmm;~LS^uL&yQ8(OlOh%>S(f*_@!QnHqS{svA+7-qWq7O6*?AqIb}TeF3JiKyb*dj{ zC3oiL=Ub<;5O6st%CMH;~R|h=)T-R_q2GF+Xo2 zN4mCWO5s8z-nS}|D~8<*V!2`3TQ;(xUiE5bf~WC1&)kSG!?2cH8*`0jj#N9h^I@nI zxtLN2ad>;AwXF8ZlCWRRP!dh*-|otjqg?o`As zzzbiCEf?NCbSs*S(&xU4){^8d{{2}+H!LPL8GoFye{3y={#Q$>1={u#onO!ylnfa@ zdew!^XbzGCPlYL+MZQK%Z(e1ChQk{{=Y`+sC>@V+s-~d>=T7vT(bir0u!!Jq$*bHL z>-Kg|0pbFG&l4xhn#=Voikc3wU3j*=x6_LEB?+<%rpLQDuT-loq6?(P{t8TU+mjDe z_jI&1HTPj?)QFBZxtGVtE#sD1#56tlFV=W?xk!@g#r>9vh;*<>M>Fqw?r*Jd*x8A$ zJFwL-9kLy-&(Cr<)I@u5ylbw`@%jIf-l3hvjC{|Sl*`x^i+=;Khj!ao%PT5PHvAls zs#y>o$9=Ghtn#cg!z0pnI4&#tU?ra%qXhNgQf{I^=j42UX`s82=hEz-M3ocwR&AW8 zqx+hq+@@Jdo(0c?Lqj660}VCz9fU|qyGk2&x8rQ>FuONZq8TIC_mA`|>T{-RQTmQq zxp|}cE`QT;?f%sLQIkB}@30Ju@sM&)xvV%m7PGx5fbL!h^t5$4F45wk+p4)xFDF(0G+9Fio+H|Vj%^7}%acj-2qZW9(kjP>1!FR*DJy5c7yOL_B74UKADU>IpZX+PIHtv7_(tOXl_}1b6>J|e)KBS0n6o!`uE50D@-d%hT8m1YA5GUmtNTqhs-FR zES6<-XGZm$c z{(y%{))^b$LSzeMhg;!J-TLE2iEV-y&Eci*gjcK!t62KpYqYdZPr54$wcJcbu>V*OIs)m z{#>Hd#+1%DRZ{~mS-9&`f2&q-&H!#zlJNU`e2$e19P5gF0+f>O)?eysMoAcz?795L zcb~u^_u4-7L@JuetF1`M$;QT^EA9_(u4k5~78JSYXdkHw@E?e!w*x;wj^+~OnDC&f{gWuCV)0OSjn^Ru+`! zBxm1fi~P0N3Ke;+=w#+Pmq^va_LlbEeoyCd%|na6;^K={qjn_^l&dLkSgpRE_N+-phsJ!4`CYC4V$dxZ-X`Ozm#e3Wi+B}WxaHB8U| z(A8n^3y{4?qktaM%JJbF`^mV6fl?ukiTjtCR1wFwur8ocuJYO#H##yTnc_~ zLF1nAjk_dfzsVBgx}tRB`k?fxv#R361>R){-7elHb1YmsnW9nDT*Sa`qf^XRoc`Bjl^3jR<{r^?l2@{t-*D10$-NIQ-hFBTlI%B?=$^lF=yz>S~%!q^kQw`Xqc z?ud6u$HY+_cNfNSyWJab?0oaLWSLI118p(PA2ZwgTMhhzwUhvwQ;p>8a@H-c)8;5Aow>idps!&!BRpU|j4YpJLwxG^6M9E>&|AN3{)6wV4 zjqon+G;dX0d#eEqR0xLA4%=4pJ{}q^(R2q1njQ5!YOdOU{zOyOT4-2OlSUVKp}6Vpve(;a;X-$6y5$6&wdCDW}cge<(%M5N~ zst~ut7t#W;`JX2gqs}s*(%(HKj5;nH&CV`rERjR>+EepE2(mMJpXnW*t-HL}OZ$(D z3o{EcCehw*+?|<7MPiXBz)xPP=*r)2s5jNqI>cLVeKfyeDbKZN z=ft+fT(UL?;nQW|S-k$)^4|?%eG%o@5ZZ4*)sNld<&Ovnhshi3HV6KA#1rIJE$6pm zZe>tpCSC`Pol-WJhuX1D_&Wv%m9lCY@mw+Q9%HKsn#Pvd?4&rpRh+)XkSm4q727yI zMOi$(5t#bCZ?r|#^rlSa_p?9*krq3-GuzRcbNxw6qzVd?xk zNy_=xxv`26#?C@v7t#+FUfIp#^>>;}2ej_2i}vPP3!IB-J2Q7K2P%8+N4Yy}e=Bnz zs@^+nowexSwki9f_9rJ_tFdvY_4i6!fH}V=<=$H;yeC7B>az?XQPKP`(dFTxE&4Hw zhVIG$oDtTifvv78o?4qZA_A<6HB4Dd!g7HzoeR>$^UI5Iy#i2LgK{xTw@xZah1I%* zG6BbSob@P8AU=fg?Ic_8v;2Uo0VGopoj9;D^o1DPIOHvM(ftNbPUe9p+NI-bd)<*2W+m<%(D<8e#_ClBjty> z8o|-gF+KV(rR{I_$q&w}AVr02sLjEJ3{<0vbR~;G53K*9N2}CKw_{mf2H|BDMwYS+ zW_bF0h}X0o74a~B9}8|6Y)gL8FIP~)yti%{X9W9{;k?5RHyhcZcT;_{Imh*1b}+P0 zVJy3arf;FebSa~9__Qw|1)ql!CHEl^X>tvK>!Lpy2Oq|u$}1Vcx&n8DlV$Fh#%hZj zc7w-3P=c}_#lBX2=zT6Y>1a53t0PD{O{{Tlh$O#dvi-G~Ct8x&Pk7Y3(qy_B__)E! zs^E-siIAZ+EaLQwcl}q1&{R($MIR!GH4oZMUi-OBIDM$`j3|fyNY5xZ;KAce0Je z6l(vN#_;_mt8W$C)sLO#Za|engw}yl7A(52dYNW=V>@!8ZNBRgaj44CndjP|wXV(% z(fSDg`!>#yxjf53TmjQ=$3CqrCk~VNN&)Am!P`;|`;zWqF0Dg9R-Yewl$TXz)D%`O zkmg)G@CZ91hi!vv%B}tv!8K(k<_CuPJ<|c^XZ{>b;~hkDFr67P89EuYZQ49M>@p*o zAu`%M>d&ch!=q!9pmE=+oBk}o$bb@s*eUEkJnP=8WwYV?`=(%+oX zsOe%j0z}I&n%)#RcFLub-}|E*;cO3ZuMVov>WS{$22^qpkaOTGG{>;`vcA$EuTXxi}bNf!yUUJczGMsj&$!VHwSU zn{eMYewJ)}t)s~`r(gOPTiA6^xHVESaWXxYeaEYYmuVJTzil3IF-Z|idQ3_k_|!o2 zLXoxpS75EYzwXAqV$W;0`bT=vvlMM|;yi$ETd#0-=|>7tBV(k=)M)fkBHU1=4}Z{H z@X%oKaffPH6ne%Yq-pF>iyJ1be7R{*@2q1ZG#qZiF!{ZJgP(IgMhuFNVzV?`!@)O1OPUO)b1 zXA)0q6+T}1t&TjqLBPH+n7x4xN*eXYgqm}kytKaYL4?r&+2p`v!Z3OVI}; z?k*i<%}sg5wyhzV(K9Ao0yssSDv!78dHk~7ZD}AN(I~(@_=xABVmmqW$xry?R*D9h z&g^K~YY_pP#JZZA?q|i5i~8L82OnyV7JXFLPHL1yIy+>3`9JF|l5b(1J@>CIr`(8Q zMAmziJh58M;jfvEZ$WqAjBIW|q@CF}(7|5MhV$iRiep+_B&7loCqGC4N_dwtQavli z54v7gjt=RV+zoRQNX(UnnhG<%IWiBVe!DrYfWR}(O?+?ATu|C1s7ipPJqoBEvps%QNUw2S79BGrUf3YO-3#{QQ@ zR{I98(a}Q;=@Xtx>S&Y$ts!hqb4JcR48sF+s-HYJ3}`GqAX#OzxLA1opcsDSUOJTL zNL_X~p-1Fza7Qsw?P#DLeQM+<&$hXlnWXk-E@|)!$W>=n&nE_fbn>Frenn5U^%8cs zZJ(?Cmi_rLxJcjInM9eKZQfqTsoVH9?sgHY8H@yQSGtt_koM?^{os=|8RR5+r}Zd5C%&ATp&0MJJ{Q_ z-Pk&kKLLKxjk!2x9@{gD68nR8{(nuD>#7+sB)yg|$43OAd1`hB2(|nPBI(6cuNDeI zYLdHlNr9-OgQTfZdOj83v^Pc9^r#GDTBwU9=wDmii!zDsd1M%N!_Zu@)6goHf!u#6 zC03fR*kW8f_&Q}ZLaPK1HLSBSl_;}#qH*6tS)L!iibm|fky1fKb+%Oq*DEprS`JBR zp_LxmZV#BvXZ!zlEz`-k+OO(sgwlGby*cg6YVH6QDDmL;0XNNTuBtORI&!cXcS8RR z@Y-seHaQ`Ui=>TxCp>!3DMLcyGcKMlA+DWHaQJ$Pq;S(S(4(s|BIh+@Jh!%xrPk3N zoOHG!10}2hL!g-1J-PIZYD#2c@gOVxat1zZ zuJ-a1r0EE+?Zu_;(Iw6@5dhMf^e1Oi8@$FAbcV`ge8Vpo!qI0B?byKlWoSdHU_YvI z9z1`~6+lROi{ghupKX(|%&iJsl>@KSw%r_rlATQidgl;!V!*R$_$f)@GSkEx-DypF zJNjQ^C}50rZ@lkgE+(!;8l-gx?wS4+eTWTsErfcNtzoVgur+{U(4ND~NdrDA1PuYb zGY<8q{qR5!P2F&B{%5Bi5Ch+b!!4YSkG8`rAg;Zp;{fGWRWGtFY*tIN_d z+z=9B2|qH`DXl;4!JJqa7>e5>zz_0Ch5ld}%^`aWq<0qRuNG=!y%Fg#K^ntJCqedU zh}))?DTn%J9;Y!^?9f*fhrW9N4SsCJDQPJFPtXJGi*%%K{iQ~_@2s+sO(*=}I}~RJ zgt#>x_<)~TNN~+7CUD|~-g=am$zKqTp_n{TKz)%u*6{-2Xg}lIka6LYBGyDC01jmC z4j@h7XZ1UwD-Z}H0x0-%x@kKQVmeg(3&0e<**}+2lN$)@9yCxry=<@xw;~WZN+9Z{2ZHc6g((sd9RzSE_v03VN-0LV zK#NNebW<0w#J0dV9N038Grq(>J|&g{y?i&K#fHdj2|$2ve;gbybAXrol8ep-v}h3nbJHmr6}pA62}rpF znRA%$r9RQ;6u@>%1>zLsbp?<$NB~|JiSwxED=s{f1gUR_KHS@15&TBH3CwTOL{UN$ zuxg_GPJoSrbzek?q21K;0>B%^4*}#aUiBj2t$@Ne9rPU&*7@4BlWCw2H(E^sbgj3D z!#Xa2oRc2#e%l224`J;eL36ns%vnCd^ZzaWixQJ20}!-nr-9PIv0aOf!)o^dxtC$f zri(nHU`;Ar=*fn@bcGa)@51IXH!{x%Mj%#Vd1h{ANV$gc;hmn52Wby0FiVVL+%9Wh z0n8LhhCsp2773(RSKtzjX5Rq+1)z^3L`|2?G$_+sw_X4Vp**lygB;+?0`KOKoe|B) z2sZfG0vAx%@|4m7p@z{fBjVS&=4Q+L4nL_g0K0Q)qE4TZrHWus-40~{bJ>5RIdIuq z_>i6W2Y~$)TlV$IXvAJuS7ri7ExgenJUf}&p3k0nF&d(2qybuiK_LMCa4vUNJ@DD? z>j0)&=({!{Rij*UvS0$hxMJ&&0HFJTDjNWV238IZC8bM#!wsXpC>dhp)EEe(5)ejU zuv9&TmhoVX>g-na&C^4!R6qL<)lmB%X9;!ZNhSND!78>Cz|WfBAo`}ijc|$oUVk=& zFjkWGJf^593W=(W>XPfWHyS5IaJZ=F!R*;X2|$#GgaFxoeT+|*ugN@C@!mFpaw#Hp z?0~8*HfVAY>$1Q0v<=^zw5hjEF$SAkJa{Vc6m*jD-d>mq1&*Tf6x=XWXd4F#i~w;@ z>6Lp$6b2lUW4RcBQ+I(a9&xwN>{&oR8lQfJW4QRMrLDU`QH>hF3niui67ldSgCh|9 zXf1YX=n>$G$#md3280?8=RdQRZgy_%?7vf}1s2Wb7agIwTG;lhy}e7E4**=3zk?N+ z3V47}QgB$kPZ88P(e}wL9{W%@rw}M;RZckqToRlHte`|~r%^<#YeE)V1sfOI$IZQi z1wnQ1vmw^z9l{ZeAk6_^StbC!${ImK?$_`mY2QE7itqq!`UHGC zy3me?K&P&gAy{2;WZ&sm1J8l0*G&xos!*_rCf+%G*?xEwL#{9X)6DKu0iyH*wAI6QYMvUK0Rf zevAy=5S#_nUC!UU<=`akt&;{}Fj8lLa-bXF2RQ(O^rB|~?qBfr%q*5H?*hnxZnlSY z)YU#jF9yrv#(McCsbAztj0-;g%q|76sb08idLuYjN*;%oR?1H9>hGD$<@43AiF ziBBVPP#NrqUL*+%FbC;r9CqpC1L~q!5hC$3PDsfFB_jqw5Q0qyC?;rI7=juC#FIDz ziUlrkFM31iDgJuVWvKcWHjHu(EpevRn?0(%Hthh+^`{W+c=mxZn?e}L_2C#fOT)@% zx49V`5Me4t&Tw{s7){P=TKCHvt+SN{wF@ z0Is?!KH=uXLGb1K+jbeiEE|6jITG=v_U01E$n9FrBIjWX=rs;kN~=?|Lex^Ss}K0| z+5R*@u*&p8zNKZ(Qye4&IPNlXWCKD4h;tMm8jCD8$#~uB@`UI&-#=$+W2UaZe8Ts^ z`hhrIT3j){cn|SVJgPPs(F35p`c|%W%qSM=@JH_y!G4}o3l)e!9Z~c$9-1{j`rb+e z&ht`ZEpR=ii87_h-0}~GB^&T=E2g~mH@(0Li|gS z{x!J$c$*-c4sM}&80f-iR7lV>QqMz`rsw>7fM*=!aL$Vt#db6cx2kmLfZ za2(s<$b(wp3f#e=_`Fk0(v2uz;#zV$4Nr&^D=sXMoC*PK-~9{7tNeIl=_wCM1Au-r zY+uoWE4o~a+mWVoN%7 z1AZ8=x(+-COMc3W@nBRMNJXW}-Gc#a2d4M>AKAudz{3ziU4N>dp^3n9FKjk|Xm{}O z_aSt`b_`q&0 ziok{3!+}ip30p5hUNGnxaR$YSVDjbLVyXp$?M8RCH!PG;er7G zF7Al}Kta+#3;^R#6agjpZ(FTEPibhDz5uwbKWC>lf&D%if_)`{e5dw0*ivLP2;3cx zkF0#mfPghklyZ@A?d#f01UTjudE3oBi8@{46)l+haR?4EPkm3hB2c1;eCTX0WS??( z?l~DRg$BXe#FItiP-1dsLZ{F%LCGs%u&)-PW%&{Nqw7e=#5RgVe)gcMuYCXM>0{kD z|A1xpV{W+v{t~|X=6h|}E5L>$m`qSnpn!lZGO`x^F%F3?>mFP$kAN`{uRy#6lX=Q^ z^4N37kY07N)vE7HoPz20%bmgyrE|x)iHH)MQaq#58Zb*EnILd8Fcl~E4zMmf!6#%b zE2TDGtT-PM@{d15Fu8F1_!egZ8gVWbH{ve=B)u2#iPXlfLeZxl$i5^wO%Uk_0VEx= zq#q6(PX;2b{_g?J&8=Mtl;O{VEh8Ko(83rEpDM=J+_GT%{!zm1yUx3pEwEgcd z#c1 zb`lzsjT#F)9#2D2R62f!pi*ByR(H|?bp?zTOzjyE%ho7>qL4d65|8S?pBHF;=LNqE z2x9@4Exj;=l@dBwFBC2Ts(72s;Ak)bf>6ZpixGPqL;)}9^`2ZGfC5^AK12=>6Z-~= z17@)1-{BS5YrLgSheFZbJ{#n1CK<;NJ)n4y6{~}5Aa>h{0#lbo2gVKf_u9=BfJOyY z*BwjYdRnl`2m)!eGVn~Kg0Ei2j=d3{gcK%VlUtG8|D9U3UW^oNTo5FVPaBae&8iJh zjYPjQqfXgRyt*D;nsz%`O*lg%LkPY#4OSxdXO$b}HRKC!`yz)W2A~O{!#UwVaa6AD zWplNs9}KdAX%%6i9}Y;rtNxubE+5$no!U zd_QspHlKIG!0T|T$_usZ2Ry)PRPI^^6(BZ^6PZN~9GKGbUt#YRGV_DJrz)5Dw4#^< zL`neNy)QcYrlJ|L5g-rwUu<@7?H+~}1hIY*$c*h5Tow?L1>a#LmVST_he&kqE}y)_ z98(Cjr0%*z&x2dBD=^?gIQTjd@hK)-rWeYGCc_4cU%v?)p=B?CIVXbLl-l@cLlSW- z$RG|G?JuorDOs(yCf(n{_AgxoNT|$;3;y|FYGHGP9|y(Ea*tPvYmjRLu$_R(1yXI3 zk9VLg57`a}B=&d;fq-)%2`FNfZ2^)HfArcQ8R;0pjlKy9`GhzULS0_28@Lf>@cS1Q zqy)%F3EIFoUDpJa+1h1aYTK>}fyD z&RGF}2`k&y4#a30G-6~*5AetZmk~7ayF4ZjE=+^$#s-F!a$1DKYfeIl6JRoWoFU;qfyk;OY8uQ3wC?<`^(ZthB)0>$)ICT=QrS+WWo>-h5pO{(8bPxuR@4{O+MYxXqXt3QEy5k z7GfC%BQxSR-d>t#61zB_C;4BhuRv3B?t!uAnMdUnTE3p z8^4UOCzR(gdl;L0vhMXQBIDzv&ML&7jE}jCiprK~lOLPA+`V%r^!L*XH@MF~U)9L) z@{RZMIz#^oONfv#EMb>!K=$m}g;BGkw5z<+^Rr^*&H7$5DibhTl?uhfzD&h_?~A(gne%jttxKF!6DDj~z=nSf zcr>PZ)*l%(5)wAC(tG(me?L%d&*Fo*^?kI?e{MPID}^X^i=yN_c8#igt+0>~_27Qq z%<*{df@1Gk!iO5|>RjmxiDyvSo63>VB0|)QX((o@n^PS(H&I3d8T>U|wqK(~s23Hp zV@7b@x)-vkX=ozg_k@HByhHI`Wt^1ik>FEi4wwo0%ukgdzB|<{Nhl8_z$^+=d+iM-?3WZyha-Gd@m-BVJDqvWldd<6c~KSOI&^$XUTA);lxJ6p$4J z={di#kuR|^Mkz7AgqV#mDC^sqvJbp@djc$H^K+cJ__mn@O3eWTM;!-;emT${ldLe zSUh3jjrT!ACzX-=o)YZc6P*<(GmX-i4-$lP@8J;6z#;s?R@pj`%Ti*{X9&Mn=5(iU z`oq@@CnMQI+gWNjMGdtbI2L$?7dB4ggv_y(Z!3Lxh%&yg@dP|4J@;l)zTm}qrOdD} zzdq#GsJqjVZPgo-Zd#Upe4us~m^9Pm7DdQH6&J9Xsi>yX*t4Enc*>O<2%m;GcY`&X z8P{e#(+SEx3<$uv%+yc}xPS0=y*#-vCmzC%-xhE;m`Nc!L=L4)X$7w%TCBM&O&w+W zo3)%^w;?R*L)ER*4Qx$@4IC*&d#IchjJe0QzPZE3<^LSB_Iz}Pf1z}J@pMCi zvWUs9HJ0EHV-eNT9MG#$eVv$kC-y=56^xiU6-)(WPu#^`yqL+=BEdHCq}s6D!#hW) zUiAQ86Rn(y50v5`5(i7Pajo;$XD&2Lc3%AsA1nuQlc(NaIELa|wSZ`kFzDZk!u=UI ztoMHC+u-cV7HYG>=ISx9k@B9Y-OuyCM++|$lvQ#Es0~Nyp`rvQIHD>}xmMi5eua#} z^z083v zFAQ)O7G%~~*Bv9?%Wx*oVh_i*4mRoe7qN&V3e_5FF5B719G`w^80{MIp`e5;`qb@B zld+HxtIx#=Dw_`C<1cROEyNU&8M)tnF(qNJv~s$|K>f+<_w&e((2I&}@J^$2%E_0? zH_l#hW(t-xwZn2G%$G(^im%)XL#5VdjvH@W`5sQVsm5gF??Ji#k%LZx?h8;*Dyj%3 zI=YW=4p=f8=dAJxf&9A1GH3)xf3S)O5s4=9)90wL8gjkYmZ=d}d2?~@5lD9l5xLyg z_&0mcK{z&tV0I32S}#g08$EK=eQSps{cOToK;~z1*o{kp{5#)s@M4@QU?YyGdv3f6E;V$q_`zG_h9|`D1BK)IoPuqm?M}-1zb-; z>NU+J_&msQ1WzK5s&rTwFm|vUg-(NO{;K^P$l8=s#h6eqF2x7QlaYnBhRUIG2lFbx zMy&YzW#Vl|-V0OYshZRG$)^jZ^JvZy<tHc@u#R!l99E*1ncLoUfqVN2QZ@e zJrMqn+Nr@b1sbgyH(nUUuv5h{IXMYB;Cr3UW_Z|sb&SHV{zoXCyhR1?Y)ezq1sD_o z|Me7Dq4!{HGMD4Z2%lwtANcf5*g;g5Y8E?;uBO76xDSY2s*4wzzp`<5mYt^^TMg~O zXFgm)^WcHZv^wt1KT6peH^-y7%G@Rk5NRHjMRuln@<^1cXy-g+bUkS6pzEUS?hSTh zC;Gfei?v0vmBaLCiqeNLPxuS7hHt%2`K&q4r5m-tCog7Y z^#ItQwO2;Ud4ac!-s*wPz-G`sp2E9Chr5_xlPxoWcNJYqZRhO-pRRx z<4XCbqM}?bSH)Zq@d(F=OLL^r$2hj(^sco3M>3V(LJi10T@l zGT;*2nt{g|!tCWsw|I9;hRdub&tT(TPBQLJxz4PDYK8BWj8dr3+`Bq|Bnl$qPp2;Qe41ncK6l65n4KyS1ebv zI!`tGMdag;ut~sr>CFa9jBN(9hPHw}= z<)}XO8b+P|7+U+to}b+<&{;C)=j(f>cDQaaWfA+#8I%S0!Ch|dNElCYg_P8#v0GGL z>L#Dl+N*^E5GT|Xa`H=pdV16_%pu{K&*jI)T{E|Ks24#+E_lthgh#;~uwL^>b<~OL zwn0MT%JGyevb3Y@a#^rtze%rv@gb|SU;vv=<9Ux@Dzd~>&n9x| z4p;<~dX>Xo%A$OZCgrVLu@<9w+I8m%2>!rijvsAp%O!#G`8vXH-n`k}oemrX`Tf%# zqq{co9gL1@kr7sHEkPQNBi547lE<;$L{*{d>I3cp~P^YlIeMcaz?Q3r8-;DO^@7s zWRS!#WfJY`ieYV6B)d+SFIqAh|3Zj$bV?wY#bYpDm||qaQXr&G)^5J*UC?Ua%@HV`H922iOwL=K{%v>%MG%#mrvnW7H5;HzS8ChJol+aE`d4Hm?^W z@iQW#sdZ1+d{@-waMv2<$+v)E7J}J4Z0*%}n8)qrJr(@621oS;dkoIYN5`DNPfH;t zG;^9Pl_}oJmzUdC(MKX7I56$q^Jb940+!Py9*Qx)`JQT-p6dSPZ~XlvP$1I&T_qJFdJaG-$ga z;K9qFl=TV*6T$2fpW~y0@on96I5@2O&7o>Jt7Fv%*zOO?oYr*~(3ZVwU7BGzH^j#d zojD!%d7(|I>zqEsO=jv(-OenATb_+qnkIEDD>h%lV32PrGU@dNLk@Fe-t^d${uztq z^S3=2#nC@#h~ZUFOc)Fj61w#PF|ZQD>`N0H^$ffF2Xe)Ibq0-1O3h(Rjkwb2rUvg} zE2fAL>boB=64C!Iw>2E|JaI?CP@fQ&1MnG1v8r64-H(U4T``QP<>AuCwl;s@LXR6N zHXYx{*VWN+S}yK48m(Z1Z8d=?Jtgp;>gwAL>sl7f7Cp?{=cWCZ z{rnCDyhk9WK_xAE8r{HQP|m5E#q?!M@V~>17m8XK^rXo~^Ed{;jGkm>@ClSK*azO= zuKnQMK;9o9+hq>Rq%}1)j+JT=CNQQ2_1Ne)+&j#rc_yN2Ogc%gUc0*ON<&4ZQwOsJ zu?3eK*jCbse3;g@wh*6Ww>-qwhIov&|M3_PhPwJ|14(gHKT&=Wubct>#*9LL{V7=}5EGZa3?>fAbawY-iXHR|r}cP-ODyXm^&|_; zV2lII0lEnhonf5&7rH>6SG6cG#5Y*=o=47tWNUZC@|lfBsw-4`9C6#uwSEINGB#-W zdWW6;3mA#$FHCAW#i}>i&2H$od|_h?Q2}q9-k)$8)$%9z!D%=>lKadx&}wc*1*RA- zS!7k253|F`cQbZ#?#wMYM@MzxoO{(2*Mh|78?WqEia(6>poiniDh3@0DqGivYG0d; z0ywkwOshcY7hT$bLhU*nNL{}^(0-Z{z0V2$f37X6(Gq?1J~h=a(jNSo4!C1DRIm5= zWCiw%N>b6B-;pS8lFPzy>gUI|@7`fGJ+g0)Eib*cyebmq9Q|44Hk< zUjMN1Ul8#WHeHMsyr4dZK`Oh6IVIl`tLmRrZrkHu#sM0<^~d7@pLrZDU?9WK_$S_J zaP02|G}YC~v2iZNs}ve=@Yxr+?QI+$AFNivoR!uXo^bfvoRrJ@#o?2&`nq0*uV5^NgjPB; zkhE<;`yd2@zeq}t-PaI-!wY`1UcnFGY(KIjyRLCbUdSx0{=ucXr7Kjy)n=o4F=K(- zjg9?2?|3}=FRevA|0MCMAwCz?rEyk%MoOfLH_};vY2xFp<(s?5C`h9}eE7vz=CpD( zP|1X#_AncL^(#ks)5o8;^!htUR;yXxa}^#>t*-<=H0o(VGPiP;1uSN&`6OsF8qWIS zJ5Ci5FPpiKVvM-14bh${3tvRH64o^uruYR`JMS@>ctnlAoho3ka`oUgGgXM z5%=hwu#lNQ^7Rb`&P^oftl^@2z=I4?XTDb35E~Boa&aBqseh@i-O;=%2arDP-}CH0(fN>qZ5$-6q^&B{y?P$dBKS!giN{cle-&a35-dJRdM zyh5smcZ1n_AnZ1aLB8Ike~vF&z!uArk|m>8{@_|r>&(oIP`86W>gC#^&S^ODD~9^s zy#gC;o0CfpWycbKYp* zV2p|y@wk&*8udZXABqfVYtOkZ)xDauj64TrvS13PxH#T}N`tzE3Z($fEZ^Sd5Ze13 zLH$%Jm1f?EL0*Z3$NX5ycI9$iAdl6I?Mc&u!KPG~Q)j2!u)gvvvLJ1PNS)y@O8Z9o z5t=OR=p*L?xY2@e`E2}0GAbQ;@PC9~+AmzQgE{HM9s8`kgATG`ts8ze4(Dm_%(#5DEis#NPyzO8$MD|FivVc_DHfIs&cPFHyDw0 z$SfX1OJ!huq=$zhKFR;|_3w>;!K!Z_q{B>5niu-%2Ht9JIfUhX`Gj&Y=-O~R)XBWn zSZObnbsqn0sq0~+3doY%zIdHc&MWQL-=0ENkdUA1snXHfCpq4nTwJoH#!N^x4o`BJ zhNPAwd+lEN>%;iob%-E(>b5qMO; z#kAfo-KoNHR@HklV#kXOi$FE7Ni&i!1w5(_&*{hMmZeSDa&llV1_dF;OH<0FN7;I+ z{u8uDhr?fDm}OqC!oo*T)iy;caLLwDIGmN7g^-b4Ugd$tAi|(wNS1y+NjCQE%BV?K z5t^aQ<~i!dsW&M`D(FH#t(lG=Ib$4qxx+x-X9S?;n*`E;YK=(-5ecq_V(wHtW@Vrw z;+moLj#aAC-C_a4CgugUO#fUo%byPQSEC#Lr-d==m$2H%5_2P)t3gZY+aZBx*7}!( zsqaxE-!s#PypIeW$yR!A88=94iUDDeXIXh==fcN1RBP-$Fk{2MD3OjvXKJHWJdsO*e@hZPaJV(Gwf+diKUk88 zUiC8+P`^L8%I1UnPV_HnAOsn;}O1;`9jK=m0V!4EVYBN?mJg9UCA%%+xSUUBAX@4>i3N0Ds z{jc^XCJm7QZlUL&CsGtV{!pEViChmLKOK1kqOn|Af;ACNhX#uak9q=EBKb2SC#xyu zFkxKeCp%cbhni!tvcJ?Csd@n{}^sUGtjxnRC&p5PvB@ zIX@+vOVgjEyn&pIn~XhiivGdEC7<5%Wc^cqeCpY~sD!Z8t(|qCAnj*N@Yw7NkTSl! zV%WU(%W6`9x|mB@FF(`;A$8~GC&Yf1O~PFKxv!4S31$mw5o^<3EDOhTEjWH)t&m3H zl;Dz>%c4ImBEW}le8_Q|^*K_qcd@{R{f@-xdQVhUwk_GLP-^_TTqNKhul}<3@I~yB zDmSsea$gOG>=U(KvnXCOVNASE2$uj`-||^3?L{4(XhJ5}i7PxKv_K1njSbeWTp>A! zOw%XxU~V_;MY19x_2Tt3n=d$WYmAgmpnJnt;GG&*oovxXw|Nhn2k=N^K<#>w{21(r z+_OL{vXjByr-GOlAIx9a)B=qW@v4c*xEs=)){NjNw^n-ah0Qkd<2xjJTB0_cR&run zin=BJR%q*uKSC#bD=991!RBB^y1V>5>WZu^udokx#_6_*py%_&?Xvy{j=fhg$JHA@ zAJAy&?jHUz|5~&u)hUY5>m=f07vRn8by3@T4knyXdG5q!7g$#}8D8QV1>ata3M2Mx7tZbuBJDTRR$%$R^(cJy{Tj+FebSryhC4hLMN$e1;#xm`px*Lp z@^=>DAmE6OHz?OgdS$!P6XlzU8)2L2X&q{+R+>|)32SFiLc^zu<^xV{{Bt-yfQ{#v zPY7W=n;ye5T5}zfc=7>shf%#|+@n{GDFtGgMhz$Is1|K{V*jK1<`K}WYWZ@XY0J5& z4ExfE@jteE9yfJtC=~U@6Kdpg95z2fE!%io19Ku}upo87gPZ&ib+Na^CshSiza@55 z2hO$p=f>)Y|8hUhHZ?u}Ub`X-2RxY`C-JRb$Uo6BI*UWuO~jes^`C1_s!el5$hVyR zftR^L=BM8xTODf^M@5ob?seESc8%XUT*CLVcn9R>5{5%ub9_%h<%9vH-Rat1 zDdz5OA%olCVikG4s3IH{u}fFCE2kKf;qPU360&+(RAAd(U;8IW zNw)^&8*npAa7DhUE#|m!><MuCcs7(~zj%Q<`KL z0)=#haJ{XEQI#^jlY02Ef%jPfh@alq3zzoRM(Zk*B$(#*6J;>CfJN50vNr<3&haj3 z%d|a*%J{jsxMmrqkC5u}mqM;mbA{n!M|?IfnAGGUBP&bT?L+KGh)QqS_8M|WDr~Dj zLi-PZlbiK09X{s`^8=*)YW0PJh6e~SNnbuad@WXq1LdXQS@-4zcbR%(=?*}g;PGrR zn$z2hkI5Ji>*m$gvSo7VXA*Hd1>fL!?st3TY4CfZf@IY4A&uCHsA8S2V!^0goefOu zy>JTrDBay+P?IP+FxH&nI(e~_FuO48kR2bnS>yaTgb`Iw+FWm~mhx_@4m}5$C~3Jz z`(Ah)j#f&W#s|%J-?%R>2L~6baJp$lix7e%p2R~NjepK3YUPX)!a0Oi3E_YXBR2Cx zndB(EthO0NAUW*ogl8F@aM|_t;Xq*4;s+V3VadpX2J1S@V zh||NQa_hqcD5L}LW-Pr4Hx+N1(q9&oBsNcqrKK6BVtzuz4F@?vR2*z3h3i43e}x#b z(sp$Aq)VRQ;rUI-GCLv5eA;_&Qdo(I1@WWmsBs6(;>LzC}a=getzBpt9s@ zWEHcpvC+Qn|NcD-Ar@|WE)b>myb3E|${dAADHGD`ojJFnvjfs#8I%tkG)HMFxJZo;LCaEf-q zAo7I#z2E1WWUzJe&y06Z_plDM|CT}VkGq!k^!8P{r#hZBh$gni3pQSd%{kj#)-X)L zySqF3e6HK^kw5XrPsqBOjg_A*$&JZ-1ze!FSHbW8?=8c=F+E$DXB1`1T6M14Up%w}A?m#%248dpb36M~OV>5jshSl^L zefy|bNvvr`TC2EWBX099-w>)Fv6#7~O>It%8QEJE<;uraJw3J&yGdEFRLHN-?jqe0ZwmiUjh0!kr*Euy z(%cmBcrf1xHxfnqdh0LR#T^I_W-9lBDL>Afem(h)UF3KR+2iZaN-Tzc=x=T}p?h8ymln^| zz1P|@*t$Zt3AVuxgni#B-^^chKW$oJ<2q{pAw;r=<*MUMe%ZO{OWk`99pJ^;X}vOW zUL4$yC;S`RxD9q+U*Ap1B+^y%-v+())kR!F#Lj2lYt4EBNni=16Nv za=LJ0a8s)HJRwIh_>RS? zkr0yDy0OD1xs8hZ4bVcg%fFA3VqIn^AsBpw{*__n-S^B?LN;=(Ez$DS^x?_}kk3rD zVy2Vjo!7ayo#`sPV%+kju=O0X4mF{VuAid%+&`HYV>>VzyT+G5tXh*g@RED*i6KQr z6*2DMPp}_Q&dII3)>{cT+wHqFn7J+y2{PkwsA2(j zWbWLVT*%MGMKb*C3MzzJ)7h)(JaFmho<0PPsh$o4d;g>0Gw=e}lKhp!BySrR&zz+h z!qko9RheCLJV^(Er4V6Q(J9?OAzjAfe8m=Qd`RHo8D!&Z&xv7KMf@wLY-7ort zhuKp1Vd=d2V=(Q6ob^7Dr;~gQncB^z89ZO}l~`ZB$EA@b@_>YrS?|y`8*Yb}MpSEz z-w^A|0)MdPvI42IOq1bf$f>x@CsIYM1TN4%U<$UV9PLz#R?ShH^&z{XyQ~@Xa9+0k z+8E0Nw?!;5?^7)C>*Q@5x89Qs6(e+u9$z2c#l^fpB)jQ(9fp~Rpy}~Bm#n<}OuKZ#Dea=lX9$?VX)%m0zfAFDbI-!~Mm|;oZ{7!;*Koi8R-d`m# z@h13+Zc_f6^+#a&Fk#fcC5$n|&ieIGP!F9y8>g*f@4`tBj0Y?xpffrYgJNeiPI zZGc)lM!lOkW6{482@?MQPW}ULR2Qw5q4LOPH8s~}YOhYOO^`enjoFDHgkDA}Mb2#M zLXP-8vXTR%B0t|x)sbkBx8AySO&%*SUtZPr`np#BGw&pF zz>?kZbtddjPC z41YP8krUf|Mbkc2kZ^~F?GIgyq*pMO3&&w+2}8`vLJw6*!R_4zrxZX`Pr^oSV&qF^ z-xG9VcY@$2?IQGzR7wy6*D2YChRjFnyvWA#-2Wk(C6wc~&C=XTySJf`45=H4m8a7T zCB9Ea0F<7L0RNl^EkFc1xZ2V#qFW_hw1|7zHYE9(`(5AP&C3$evF((*s!+}Z`x86M zsj@%hlVx2GAqzaW;jrW^^tB*|$Afa-DvwhnanJO`J%irmUIr4_vi31Fn{9 z!&aN?6HTno)PSd5&ff(M6bbLO=aD7lHfTBJUn)|u3j5og*$}zoZsi;ArqqW?kOQeA zpU-!40wWW&h&0xr-PJV>sil-hlp05GG5hYvunHc>)v)b{%S%7$#?ekq(;zFn`FJ=_ z!JM!_S9;>6)*mqW`uj^|b0rXpAUu3_goC+%WJy+aSRh{BQC$o$W>4*licL)^N{t(i zn{X;F7&kr0aQI?h=;08ti3b29jI8-7%1U4rXxFNC!wBEbG&@_D7p{%vDPvX9rsVkz ziq>{%s9|aOnLA0WEsoOEQRp8cZ{p|f#)7`R?g+(>_cAit1dR4(pq*8Ms~r!)tP_Xr zDM~%PGF_<3$FuJlUQ;*S$5p+dW2DigO{HT34Zo&j&zgx*5g+gmh30p4VggAf!E^Ktq7%)0Fj0P zzrp)u(g+hT{Vwi`=%UoMBe!NGMyt5^Z}F%tv(80E2UM>`1WsW21Xnye5ttH7;|Bw2 z&4GF)O0CK%dRN56#C}FeN{ZXhcA{9e@HN8CZFj6EP=El0N}m@&{c>Zr zs#57^!IghCbC<;*nxcH1-liF+5J4hPM?`MF-`%7}5XE-o$`yX&8{R_|tjhV9A}Ski zP$XL-@)0b_k)HY$O%%S~IbD7e9Bf776x-HwLV`g5ecYH{3NXJG;SNGnZj}-Ww z6YT76DE}X1WUR*Wg^1d4f&7aBnq`xgfH%pz`5eF8jq1|}*#H72pypD)i*luLYlaV? zv==Y-Qg`?D1@t>>In5pJDfL!N&uwtXEp9)(Yl|7eX@`wCyQ~^Nq?sfay*%9XQikW^ zc~K|yv|DqLqY-s~dTGzE(d3d(BS~8M)qaW+_jE2$P}p57_Mncl9%~lVaF#r&^6^~p ztHX7tlWr!^0S6lJ>x!~m{YgfW4J7OOYqK(C_Ifs*nj({$QIpYgX(#Z0m;*md1P+Nx+i*C?fZTndZF%OlOmudp$;);_%^1 zcvC%xYT^-6w#|gW@SjhW;lh3$E*lY4r|+~XOH*Wu;yAa_p1pqv3->|#H67=jue+I; zn2s(bB_!p}(?JIB;%;ryPUA1z9cSbu`hs1?_{?#~j6o~0c2D4obrt#etqN&V zP2?_bIlp7(dfxrpBy|l9yZ#Wvk9X2e-iW`c50qnetfirMQ~d5XBvyg!tR#14hhIk+*|~FXC7m=UE{!OhesmgXnKR#u_fFN9Ce4)06ovrqDOcsV zn%h6LQDAdQ%W-N4!N~#Bx)->yG!XS@@YfSCGj(}R4qLZw9l!4Ym}^(SDbV>Kl_!RU(3|P6 zvPsK5cJbvIi{797#ioxT!Dmn;CmB>O4wTp8oShy?w;Yq&x9cY3zzR?5qHePm-2J1O zX0t0y8Yvqq>7R? zaxGqO>|gP(-tPd6S{ij?+bl&k=i6)B#Yvj(Z7 zdNWB=9=sbN)=D2_hw5!c8z~-x;SrnEz47tdmdAB*im6~SC-Ucyojxfh9W}oeGF4R* zE@DjwW_+0clegAO75pAp#o>XwGp{)x=>u&Fi=g8>Z#&d$MD6alltyCRArr- zg>GK2oI4-Ju70luzxO0PfVPFw=j)%XmqyFy$6B`70KTfk} zL`xFHO9w0p^EBxIqu5V+)12>+tlj(ey~mm7H*G({7Pk^6n)Yal*Fb*##Z_wVuHoL2 zi^o3D@U1&Beo{N}%+JHEH5}k|3~SEC27?-N_>wOd=*xCdc4TiXq>cstS}+reu5kDc?1`2iQX zsEX!XZPH@aGUY#mil`ynYfOK&*z~WfnC2seM|M0~AJO0fmw@H8lk_CdS4h`mJpq zR4k9lZP-%*!Z;4YI&(cPlZxqyYc^YlVw9ZU&DVcYuSp$I%6n?J)E%ctc!s+rABCf2 zJX5U5M5%Hns_8dkL?i&0c#M7>6?ruH=FVXIw}h8vOC*aV*psIeQ=wB5uZhXu0)lDyC`bPP5{l-qd zoDP{-$NA&1xA&H8XGGggVR9Td))qcVuB@!gjHP$ff|pq-Pd4ki05Cjs+49|m7r_D# z>9FE?Nn`FmBZd^O>ZtB3Bo*i+w_2~YnLKc6>=4_yCAs`{q6YCO($UNx^-L5lGa;FH z#;L@c-?T+#n+=7z5upLACZc@u&HllWpK{E0M(|lnxJMS`CvW^)|`3b?rUpDr?(=#HwlWtN4 zdm>-vd!h*pGBCvQ=v+#QEA`PAm9rLH=#U_i&;ccAlT%aVP6;Or`h{TYLiz7qDD`5n zTVKi!JHM`VHINg4Zd^zsFg2q{IXZh=ED>I(TTo2Re-PsHJBJ89FzAl zMYU)d8CGOHoHbtCx+Y4l*Yc79%@DPD59p=kY@Y?R1o^%RE&vN_H=10^YrP9-qssb) z^NuquhTEyRD>Lf`hT8I&25L0<;ncdd{>pV&c%+pN=#^n&V05%{Bq{#@OU<1e+QKuz zb8W_vCbiGt>va$HAw1r7rYl{MjN^t_)k7yo$H9*Ljfd%M%)TMZ`^K-xd|)1x%=fq) zme%WD8YSnXm18?q-=hlSc~Uj85_@b^7rVd1dF3?hb>74g0((@55UGrkjKXvC88@C; zhb>*fk!}sW^VUs+Lp|dRiA}NHv0uA<+P`J30lxUsY;Mvm36~QjAgW^Xa`choGb{}; zNrW$eu--F0@slNuPkOlzJ<@8Sq$o4q?0!-9pq+`k zuy1v>!u^lcM9$2CCr@nhBCF~YC99%b*2{oEpLadLz{u_$AAd?wVu>Y8&=f$fuu8(6 zxhLP)n1_hH{BYsL9+GiWN*{phz#widiK=PT9b=u&`d#UXJ;+4ujnuTacwMMwk~N#^ zI7uN&LLLnTLf}F{pxp(y8)_%7&zxsQwP{+!1kme}ZHr)}~_qvQ!aO z-@eJ7k*1#Ar?n0~MkSpHGW~*sjVySfdzm=m7UR_4p6224Ns?UkqK>i+*lWm5r0)C% zoQwU$MO%{>ol?i#-GYyqu8VnYm~$eXW89J9m%i+kF)}s$>TnsQ_$z8|XIHiM2v5RP zAdGH2EGQ_bX+Ly=gnxbD&n=K#U(~f8Z9ESp{|R0fcrs505wC&f+@h@0@l8(m_FjtL zHMR74gW#s#wDPU~zRaZGkfK z^WON%P+ZhOTG|kU+ZQfuJudCpIS}Pif}V=;wrsZ3-%QW} za6YtWl#GnbalXk=E=*v*qM~A?<8)bN({&VFsn~UF+U-Y92Xfy2fnVXc9xXL|V?@xn z@f=iDu$UdM!?&@%u^?%SRT+zzoE#69 zLQFk(>x9daz{N<1FiFG8^uWy(7VHxVd1SCp=rZdt{K88?18);6}@7M5c-)4!1EiX2=6?zT2}qFf-++k|uv$B-j6& z2%)`>>N(By_jpgbSqi^W#h%Rb8g>h9He`GE_7ptXMliGB?DkM`%6K!1+fEfz+rdwr zuwQ|f-%};np$Erlv@!V^@c(Xv$v(`N1`#G>rP;8nlnd92l5czN%A|YcwqMwKPzY~u z*Q@#Fghs-aH|JwCzt?m$^y=w&9PBw5%rkh36iL@YO>Lu!%8i~2&u<<_{zqbHU)XW3 z_0v@6?OIb^cS zhO}>~^K7$ZT?vuzqWH5-eKb;=hDd2enlyWex6Jz#*0?U@(Uh!c?N^;rlVaIxXK%+l z>UWIDgo5fq>Ev1UYx^_EjSo&tVVkakFp*RUE>2BYK8Wuo5_wIY-nLU!121?&Z&6cqrM`kL zn7`@nCX@p?SsD&9&sjE+1Zm#*#ey<|&U#>hX#goSJ!>%v3RgWqW6oN#1JWG2MmCyE zGbeS~yjciRuoPc@kmJ%MS8mky&O$+nE?`+9;j&VU&-Zld*q{{qqchTU3bi#UO^^5B zkmb~beApbU3(Br=+oA0E14mOKS7f@d`-*>pdg#ZI>z;J{fq_3#9 zV%)w*Vnbl?yNh4E$p@uIW)#p3)z4egoL9Vpf`tuH8K2{?F} zAJ02_lDC<-xbhb)F$po^6IuXXhMEIg$M9p1yHGmfEmLE3zLVGAIFgW;Ujh~C|57!O~hQoMY z17A(fiMh(Z2{d?(btg2*{@xuK`HaaD=L1`XhfNO z`<^|dx0&qwXiBUVi9jn+@clYcD|>p|C8>3|_`eqBG`DYGo|$Tinws(N$Utm)T!P_M zwrh6N6zZMGHw%nR{8gio?B0tU==O7ni)o>Dp8KCPA^ZxvYiH?i8kVHU%B`(l&fCxt z87UN)Rw*<^m;weV{5RApFiv zW`16;=D_ye`GJV_m|FriJ5;%~^he+O00s-jfg3N-84TA+=5bwM=t^D4HvqCt>YbeF z`tUCIUdwI1n=B(NB`414{!%50o&CnmHJ*s9JKp=@9j-ST7WGBDkClY%cQ5hj`CGcV z6Nr;vYSRFepq?@0))@-#1OI*CG0LW=nM|d_u$Nn(@PF>R67@FWL5O| zkA3i%$>)gKeKNQPfsX|}^hA}A#nJFjT5^WpM~e~Fb&Yn*xh6gwT9OsSYV(Um3eoJp zb}1!Lo96lHTg8hMS{BQTv?MfV%47GheY&&5ug={~4+(7zot6>?WA#k;Ic*oq`9GO| ztTJbI)PA!(1?2GZxwrZ59G80YXP16_LoML(7RCJPzP&+Z$P9lZo4EFY=#M(+L!_Kj za#3F3nVT8Na}OY!NPVsLSQvEj~i`_Ej+Ic3voi{dhlB=jQIUa44g+08BW}y-Ql+`?Tm@SwpI&8rmQbvW00tcB(IeAL2n6~eDt~AN{d1Uh|6`<5 zyseE(_?$#3P|xV~XD;JEcesqvi%GcbIeEVBR8$4Y%-THmUbeJYUAiCFD^9?h(|1@z z1t$zzN+JmN85WCzp3mpSh}jrQT~J(I zbvF4wkeB-R$3@T8`$9WB4xNELX+?Q3xE%e-=9 z-lk>N#NBDmj|mk7>E`J>HJ1F*$MKA@oM8K;6z|Iw$iduHUpA@yd@b$O4UdMDo(YXx z3HAIV8tNMCDqoLYy08aT8)6g%yZYyUUc5R~z+Ipi*X+?q=9KrviC*=gfl(9%sNlog zriwfBBp(MV@;9t}l@Y+b>195kXlVMZ(ib22!7TY{Mn*=kzlv1KE6ATKA%8g=GC&^=oa?FI569@i zVt=JJG*l^Us$N!Oun;Q#Re@=0O<%1~WeZGP;Y%$Rbrl3s6L%%-rAm_4jog$XR-?x! zeRCuBNfvvWIvcEW?^Ra2FPtr;9J79$|6U6^;vaPR6}CVMx{U~h`+35E&#-^>&jKr4 zt$+XgE7fL@2DW_Xjy=0V!j+^d-_}si7v4|f9jl;4MxPAtwm=2j)^5=Q7rW-SDkX

fx(E&I<|Be?R^GC<5zISbQV(PSH2qpAJ2| zja$07CB*84qiRk=oiqkzXQt!;T>m55>(3c23Yu?{0>@_O-1H~VqoQ2)4 z^9+5BT|Kub6NXHppt~{url9cbNn*we#OtlHQ&A$Zc-i|3J16swgCTsi|J1qxEYT2` z2(=136Z$rl{$0K3k4{#jOkcw0_}`K;uBzYP5+b(l`e*OV9Zo8u=sdm_$@7{f7T_+@%^p$UYc00r5FXBN!Wc+$Cgh>SzdOh)_F^Xam}cvVFPiIeo9FkZkUvl{#F=-gjH}0dun&?E~Xi)}5zXKZLcn7yo_1eBvn? z*=ZEzbWmWHl`-N$NCY@7;W8{U!sTy{K~p-|9JxC& z?=;$!=bhU!Z`t45q}JXJ4v6{K3yy0nk~^NwD1VXJGEIFT`mWDb6gxgGSWt)t1;6>( zEg8f!=P`7o-R6)?UiG6+lctl%g*)Cd$u-ub(@O0l$V%Zh6Tsdm|Z`}kP@DJ zR!U@sbzQwWzMVNhGKqeec|QX!n8Wb3R)Sv4=;!eQIa^ca)1FjL5I|SBBO^ z@%6*<3zGl@4*p7Dh`<}~Xnkm9bal+PFFX278EcQ)8%<~vMNxE?RR2AtsvMNovvkkEl zoH%KB-eQjHBF~9-DCA_?B{W%Cf;MXJO0LwBZXEG>xo@ags?@Ce=N07h(b-ZLzE84h zh`ysHD$}e*7jHR{YTs%{K-}1Tj)Mo~;A68ZG1FkSNs1Y_RIkY;NGm+G$_@QvBY~!J zYYgOIF+L42#yK?z^iI}Im`eXI0TffC%VuR|NpUXj_o>dPM|EtxH?U zPWpR;I_nIo3qq>%O<{xU5od|s$%-egLAxR&8Ex(DI=lMZdGlG zW<6q;aK^wHp5KD`TrF)DR+jQyo(}e%b}>TZT>ZRDzXl$R9`0%N^QexwJ#Ok55rZd1Gb z$E{nQKd1U&`$@;2{YH5Ahga9XN>PZ3-LR$1WC9Y5LOcpmT$XU)xHK{nM1@@GAY zJ=ENvr#|VO5>O-MgG@pj0+`LFv3o}lh943Bry2{5IclA+^OqTzMNY11tr6}=lj~LG zL)dAz$zNrxfja1DR%zI^iUVF^g*Cu)-`&GD>AnT%umLnb&KhRy+N7Q zea#H%a}(y)bIUh4R8%gRQk2;)J01%`72{wxrJK6~*Q>ZgKXs#Z-Q8auV-@KynQr{L z;pLv0EFrMw6r+=2!PEWig%zU7YwsMfLC0a87*7VZR*0Q3<&v*Fq}*uy;;Z2ZH5Aej zlHow_g^VC?HgpUxF0I5mFAbDm!N`r?@-Pn!%KI50y5wLzY|cQ*P&fsr+PRkn!;=RZ zyC{!{J_<(?$QNy{H^0!R%hSnEIVKDPl|z8BkAIRnAL8uRhdrdb@jd z{aiOYFed&3ufY-iIsprPq-!`JUa*9`^tOP_rQtF0Trm#DJO0svcRs%Vs)SAj)feWO z58#9!X||*>q&CD%7&CLL@5RM^B{kI<8JXQ(Ly;dSm9Qv6F7wO-3Q+^ALo?x`R*{x* ze))vlEb&o8lDZSq2fBxSSM81|lAhRE_=vKNkO)TB_Ri<0TV7 zh4_rK5yN{7aD7=8mzXQ}zMUI7<6WQZd#WN-V4i}J zS2+|UXPCNUG9zR0kBeGDeQ&Y{vZg??4jcQN^n0WuQmMxZSLz@&YFJ( zb*A?cS5cEWP*PXn#~a|=68hoHzGqhlxHsb1or9r8D6nNP+i00p3>`yk4IjYl{&g`- z(u3(_{&-Gvb?a(M->x^{w3(AGQ`nv5B~Di~a}0*g6`a`@`t{+*ORpqV{B_=4+VnD%ig%Np4jkj& z6BQgR<9pWqJ+Y+5$lRTtbXJ?BNrPd${6u^{9l^n)hW2u$&h;=M{Kk9IHZ-KAea9?k5f z5Xb&X#+pb4xyP^(v%$J{`8m)siPKag=rysozP(40TM?}G@sHOerVszTc<%1B-ecgv z7X9T{s!5VLIbzBlK_-kQ=|_IDoj8Fw^s2F(`t+w%!R71H8s4&}b=X`K*M*0hU#|xU z242$emea7wt|Xlz=T?d;lLJ~OU)rpwD=)`566?PND^lss#h@A33%f)b@}CM-|2Ph zp4yf9aSJ-H=PfP|6bQy~I}1BaWUZO%=rI5G;lU9>1I}=i%VlXwEAh~eKiQ*?$GGSH zrep2v`0XR;?3I-~T9T#Kmxu9ePZ6SnI{S3VoSK?iT|JyAlXUBSFDv6%$}T2jW7|u` z#>PqYMM$_yZ^-ogEZC;^>TbVtv5g3I&YA)KJ_GB zcV}3{mhsV1&(t73&{GSJ9fKc@xU@2S?cas{k$8+fU(%A)g2f|d8HBJc6#lIWIN~fk zIkRxWrd@q|KSaO%+}4?X*$5vc7zSVwZ4L7niX*j-*N(i1KJn_>S1BL<3Q9^!VW(A2 z2!6-lX=i`Gt~g;dpCc8N_1mD2{qj6h?no&{y>6?eqTOG3bY07EL?06opCYUVx!uz< z@;cpHFmXIl=s~|pEJ4h#*K3((ae-V|%y|AML<$X}MQw|WVf_xv`sPC>l*p*ai_5pH zNjqHJP#2!sJ6X6EJ^$P5{x641j-Xn(+~{@_$;{(?cryW8 z)RTd(Nc-5xY*&YAUYm4!KbJN_uUAzaZt&+;Ro!aQVxlI5^^QCYs9H5iy{C4m0tt5t zrl@l|j?wPAn%}N6{6?3c3`iMiU|neqi&lY1w+P^W^R@fE^4$Npuwgs~nozqU3hB8`YkDUnG>QM}8*|6^XttMa z%+JmjwUq8vO!Ds&4777NLEWwqo(w4+9erMG(X;aUb}NU*@y|COOdF3D_0l%iOPzAR zIU-+wLWgE&$!n^US6*%@dF{eRTQ+vXgfsPyA71!_+&o#Fe^al5LfONc;Spx$3hU#& zqn_E>YWJVC6xQ`58dkQ@qXP(H*c*ww&!4;}=E>$m^=e-_$A`QI7@FVX0a3If$Cxa* zU`0};?>U&&C=+O2n(IdYyEoCKjCxXm!!bMSQ|_DN0G)B2|M?m9jU{HG{I4I4|LcFE z6XL%=Izh;?qty35KiNcg(|>;&PyFZh{&U$PE>!sc`-{xhSJ0U7-=E~GNWK2|7v27U z@Rb#7?Z40L>+b2{Vlr09t=0DTx-`Lp0kK+ zA1&{88=D2Wmb{X-&BtAk9RG%%<<^>Q%{_Ad;5#sP>rj@>ghHOJKT z;=~~#!~F~cm2tY^Xa95g`d{YNbY9ls3=drxBK~_v|LX=V+ib}Hw_WuamE+qK(Ekq- zkg~FM5lGq4-+1ct=g-^P+FkC>Gnz{=Pi5uu$645_XxIBT~Dl@jip#ZapA+16vk2jf+f48YucR!jFn6}_wX(Lnx|ee?pJfLX48Gro;UcpgCb zL~X|Pn!ny#ovz>}h74kMRO@UWaGj%u)#3L7F^0~dXk7o(moH~R&pvn=7Df-^`$tWU zx0cIV07iZO16UjAvYl!R_G^>i0M-zzmG4YUO2Zc=8~tXkPt>?NRQrKg>`T(4rl7|% zZKs&i9yG)PP{brgpy`WP?hwq!BPehT105P40I5^KogZ#u7jW_huTDcsfTyDd_Sze} z7N@2OT2jtQUF{9UN(%t6Fvbp8EopDY*Tl43y0fzB?bkJHOmpX`@Lk8xoOuHZzzYKe zwOW2V%#7(5nb$kaMCkt+p*^yaD~!k7$0V^DI|8J!2)Ka}$P%hJ-%0_1x|kCBBr1vt zkWe%Ro)Y*2W|JmckKLi;H@a&rRd#oNT3C5p6Z9?E$5KEpA3w6E_QIc}R6ml-bU5=ks=b%bat zzDELcJX3X>sSCZV#+v_R=1Uue2Ix%+s?4chdxq8%G53;8Pm2NRTx$VYm=n0 z)^!P2?L1>Hd$-%Hk1`)S_MKfbC!=e8+yv~_39BV6y(g3$4I1T3&@M2Z5doTo_VIPJ z4ICE`5U^STqPowcSO=$9P?@perrw`*FIv75A*ba>q56mzr91MR_7~W8y`ZjbV-pf; z+AfWk~jC6ir+sk7K^gNX^;u?|TZAb(wg<@<&iOfr2vIcZjSW}oit z;P0Jx-qfOKUyCA{Wo9>4P;tC^p)OQtU4`j7MS~4Pn@PWwWv35 z?p`=q#yLNW^{+aWl%un_yi7NdyAZjB29GTu>Z|u9I5;?r%LU83uYgb?sX{(_fmkJ4 zKvOXkOiWP*OVD4hJ3H@gJ0oDyYNRBxqL+U9S|a#=;2i8XKi>+idw!bg>hS09m;pDR z`0a15gAJMh-k8{-ZGt#o4)dd3cpln7@$B2*jA>oTnPM#ms#X3~;jpsvIwDMi(RE1R z5>M#Waz(g(B7Z|-e0=;<1^wCkQs6CbWm=rrb>PU)$TR5aiYp5kfQYVfBsm~ z6s{dvLf6OYT!R*xC|v6k)X=%j`%BF<_r>4)`rtzsS61TGCc1SR*w$VNJwN-P{*3dg ztpp|)hecMg%zrN|?1`-qwI1DF@b02(3a^)5^#2TfL}*pVSraD6jRUuHcIjwL)0h$Z zFszD-i+{hrLdi*WHPj0CPmE&3aL<3}=^ZKrL$ZCQb#ng}ShVNZspmU0*x1;V2XQMx z1wKri(7~A?VQ1yjx%8XbR&`#!c!BYn@;Gd@2@mQ&)9$6>Bw85!IdAh`WP%XRueT4S z(NR;EVzy+>C*`=wg@rhSs!+kTLhpo%!5+uS&hrv$v&zviF=Pi0#9r}Z6%=1nNg8kw z8(fbRv-8DNV;>NUcfJ|h)wKwfj|d+h%xzM)H?2NmZ>m7_!vNvWvK|W^FAQug7fg$a^B z#z4!^aH*6Z5fV&4*OFUeLM*xx(L>Ps$jSZSiN-u9QS>fcX=s@>d5Go}Xbxp?ypPrz zl!%e1_nf&PT5wo8q4M*RO8E=NyguaTOG2Hb=U$^@5nPnSm}Z>U+@S(aeazrfkQPfk zT{gNF!GG_d*$nv-IKO;{h4FiVXHDC~G26N6hU9loiE5DAik9#ALCHyb(tO8sK;Aj? z)a=UEjxs+sKeU1^jJKHzvzz%h%si`gNL`w34;RKisH1;MI(u2tvZNiV1MwFNq(Lf7 z;9)SIp+e}Kd?e1dM3!Czo73 z)3!{QJcoH>!o)%4;vxGmEU_QThW}x+KOV57p(nJ<#H%mDt7lD&iFPXE8w=lp4f;?# zX?UJO5udT0?0huhFgti2qKE#WPLO7wwellzt1u{AVFiJIEzD&!U6*DUKmmx)di2)( zc-vJKl|wl4-5{+l{$$1r5dE zAGG593OO#GfwE=g=SSnM@002%N%8_*w<*K?IFMygJgh#(yAs3E(UCC&B^4H~#Oo{W zRP8USM$C0~Sr?9xejRP}JKCVshm+T&RfyO;z|l=#?@=mBuJJ<~)YGaTxZC(ed$cvv z4}9uPzIDD^-?$&|+DQyS7+*fsr&3>eh#v2^oF8k#&{F6~Mq=o*O&9^QqNPIl%iZ&! z`F8K$--o`ZF6fPc+7DMjQnH0aDsS%utcV%1dk~~lhVYq+=uU!pkA(xeedkW!Ew+{7;6aPJmccpBZ|5d3#5}S+jL%}bSwShFV40q7hi<{91D`|rc1xSLd^ zS2q|K8NIY=!i6ng!ds!FqLRkJ6SAAutDkt&dQ;k~A#bUl9&Us@&OKO6$NI77&wkFI zz7}ae%mKUL+>l-q#LYBWBNwSw)tm|Q9NN;BkdYz8=rGEIait|h0}0^_DK1PjrLtk? zbf7C19!c`GRVC-tndd|e96!$35U5wO&M0c_xpO}qMxO4#uupl+79C&C1EwvuGMf5X zGZ(H_eQQcXNC5H-`Xq_{b@CS|Ngz;sFk|9a_eTIx^Pt%+Oo{1 zx{F0M?-Jnw;m$FNze0Yc;laFtxDa2r`NJ#H6Q;JdLi_3XI|=iLvjnjnj^u0V9+^`} z>9(>;?TKxeksP;iNkX#%EV?w{_MuvoD@? zpA74Ue!7b^L_q{D1E*$=kcM|9nCLH-0}42DFI`i?o5mXiT2&KabV=9V z%lNEh<2{1@FGL`IjY8DqN6?GoY2APznUt>%31VEO5oDe zo|d$meosM1cb`;(znZX2O3}P<;VHzCr?9Q9tr4vO!_x>uXfdLg2Q?8-P`Ytk`DR^D zlWit6d(c#H?^~*I6YsV&@TGL&{CwcjO3`!IPQK-tWi=x4tEV~3Q>6N`H{;o-&z|jv z%b<&Av~g^OnWqOreI?xyEqd5LSGePm<2FA6_5YY_W?Lut`1|{tw7^?V*+K&+!sTsg znic%%jRc&|op)h;`vTL=zc$R|zh)F~CV9eYcRb34nk-pkB%;x<74d5LF%O`Gci#2q z)@mPR_jQNAtWPT8s}a*B@=tx*+S+I^=D{m9bbuWP2VdiTl?zj|!Uqkw?F{tZL)hzc zcqK>4@0iPVr@g(s1?}fY?~$ajW2q9sdZBPY=|GjT-}u+x96idZJ&oeo>odQDd30}~ z(TzKAks0kk@AS%iKi^4v1r_$|^KCvXN?`vyy`aha^Urqv0WDwk>4ekrtoHBdOT86A z-baKjCaXm!-aC9oe?Z)NniE0`e&fbBu*{Tnbh78pk#Il6pkhQ;xRimiIE{w7da#f= zGgNB~T5yOLi4aN-jXwlMkJ037e>6i$gy}@{0nu}E&+7*C>mU*plxzy`(f0V&xXBle zI?sc8PDdxFrQ!G_#zFIk8nF2YE-f)gA8)m&M!eXVta~}Dba`RSWqF8AVs-K&BvlIR zwwaBMA-X7tj$4pOdB`>Jr}h~WzaJNAzXzp3+#1KVw+;J^gr}$5Dk{&7b)MUsQ z6f2}e+!IkU-T$+^ygWA#j}F76BOIVUMpV?~8$(4%+tb-~aH@qogONrk>8Yw4~GbziS=vHW4@Aj$2;fWiP<1H!iOTTH5sy zxbtJl?z_N-9q$1NyPAq!rNE`SKY$0u_kwz@K<~K$cX9LpW9VL4+10yt>-JV$cwDpm z`m6f-x4?7Fdw?TEhk(s?L*VY+MBp9`Dd73{Cct5h6wsyu$~%t4Rk_fBI+4kT`t*{(WH5*#CB4cWp;kml3cNm)6qKA^{312Zx4_ zH

" ] @@ -631,38 +633,10 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": null, "id": "bb873616-fc3d-4e97-aa8c-1498dcb00952", "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Last updated: Fri Apr 19 2024\n", - "\n", - "Python implementation: CPython\n", - "Python version : 3.12.3\n", - "IPython version : 8.23.0\n", - "\n", - "pyhgf : 0.0.16\n", - "jax : 0.4.26\n", - "jaxlib: 0.4.26\n", - "\n", - "jax : 0.4.26\n", - "seaborn : 0.13.2\n", - "sys : 3.12.3 | packaged by conda-forge | (main, Apr 15 2024, 18:38:13) [GCC 12.3.0]\n", - "numpy : 1.26.0\n", - "arviz : 0.18.0\n", - "matplotlib: 3.8.4\n", - "pytensor : 2.20.0\n", - "pymc : 5.13.1\n", - "\n", - "Watermark: 2.4.3\n", - "\n" - ] - } - ], + "outputs": [], "source": [ "%load_ext watermark\n", "%watermark -n -u -v -iv -w -p pyhgf,jax,jaxlib" @@ -687,9 +661,6 @@ "interpreter": { "hash": "40d3a090f54c6569ab1632332b64b2c03c39dcf918b08424e98f38b5ae0af88f" }, - "jupytext": { - "formats": "ipynb,md:myst" - }, "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", diff --git a/docs/source/notebooks/3-Multilevel_HGF.md b/docs/source/notebooks/3-Multilevel_HGF.md deleted file mode 100644 index 0ff8b0dc2..000000000 --- a/docs/source/notebooks/3-Multilevel_HGF.md +++ /dev/null @@ -1,242 +0,0 @@ ---- -jupytext: - formats: ipynb,md:myst - text_representation: - extension: .md - format_name: myst - format_version: 0.13 - jupytext_version: 1.16.1 -kernelspec: - display_name: Python 3 (ipykernel) - language: python - name: python3 ---- - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -(multilevel_hgf)= -# Embeding Hierarchical Gaussian Filters in a multilevel Bayesian model - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/ilabcode/pyhgf/blob/master/docs/source/notebooks/3-Multilevel_HGF.ipynb) - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' -tags: [hide-cell] ---- -%%capture -import sys - -if 'google.colab' in sys.modules: - !pip install pyhgf watermark -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' -tags: [hide-cell] ---- -import arviz as az -import jax.numpy as jnp -import matplotlib.pyplot as plt -import numpy as np -import pymc as pm -import seaborn as sns -from numpy import loadtxt -from pyhgf import load_data -from pyhgf.distribution import HGFDistribution, hgf_logp -from pyhgf.model import HGF -from pyhgf.response import binary_softmax_inverse_temperature -import pytensor.tensor as tt -``` - -```{code-cell} ipython3 -np.random.seed(123) -``` - -In the previous tutorials, we discussed using binary, categorical and continuous Hierarchical Gaussian Filters (HGF) with different levels of volatility. By creating HGF networks this way, we were simulating computations occurring at the agent level (i.e. both the observations and actions were made by one agent, and we estimated the posterior density distribution of parameters for that agent). However, many situations in experimental neuroscience and computational psychiatry will require us to go one step further and to make inferences at the population level, therefore fitting many models at the same time and estimating the density distribution of hyper-priors (see for example case studies from {cite:p}`2014:lee`). - -Luckily, we already have all the components in place to do that. We already used Bayesian networks in the previous sections when we were inferring the distribution of some parameters. Here, we only had one agent (i.e. one participant), and therefore did not need any hyperprior. We need to extend this approach a bit, and explicitly state that we want to fit many models (participants) simultaneously, and draw the values of some parameters from a hyper-prior (i.e. the group-level distribution). - -But before we move forward, maybe it is worth clarifying some of the terminology we use, especially as, starting from now, many things are called **networks** but are pointing to different parts of the workflow. We can indeed distinguish two kinds: -1. The predictive coding neural networks. This is the kind of network that [pyhgf](https://github.com/ilabcode/pyhgf) is designed to handle (see {ref}`#probabilistic_networks`). Every HGF model is an instance of such a network. -2. The Bayesian (multilevel) network is the computational graph that is created with tools like [pymc](https://www.pymc.io/welcome.html). This graph will represent the dependencies between our variables and the way they are transformed. - -In this notebook, we are going to create the second type of network and incorporate many networks of the first type in it as custom distribution. - -+++ - -## Simulate a dataset -We start by simulating a dataset that would containt the decisions from a group of participants undergoing a standard one-armed bandit task. We use the same binary time series as reference than from the previous tutorials. This would represent the association between the stimuli and the outcome, this is controlled by the experimenter and here we assume all participants are presented with the same sequence of association. - -```{code-cell} ipython3 -u, _ = load_data("binary") -``` - -Using the same reasoning as in the previous tutorial {ref}`#custom_response_functions`, we simulate the trajectories of beliefs from participants being presented with this sequence of observation. Here, we vary one parameter in the perceptual model, we assume that the tonic volatility from the second level is sampled from a population distribution such as: - -$$ -\omega_{2_i} \sim \mathcal{N}(-4.0, 1.0) -$$ - -This produces belief trajectories that can be used to infer propensity for decision at each time point. Moreover, we will assume that the decision function incorporates the possibility of a bias in the link between the belief and the decision in the form of the inverse temperature parameter, such as: - -$$ -P(A|\mu, t) = \frac{\mu^t}{\mu^t + (1-\mu)^t} -$$ - -Where $A$ is a positive association between the stimulus and the outcome, $\mu = \hat{\mu}_1^{(k)}$, the expected probability from the first level and $t$ is the temperature parameter. - -```{code-cell} ipython3 -def sigmoid(x, temperature): - """The sigmoid response function with inverse temperature parameter.""" - return (x**temperature) / (x**temperature + (1-x)**temperature) -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' -tags: [hide-input] ---- -x = np.linspace(0, 1, 500) -sns.set_palette("rocket") -for temp in [0.5, 1.0, 6.0, 64.0]: - plt.plot(x, sigmoid(x, temp), label=rf'$ \lambda = {temp}$'); -plt.title("The unit square sigmoid function") -plt.legend() -sns.despine(); -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -N = 10 # number of agents/participants in the study - -# create just one default network - we will simply change the values of interest before fitting to save time -agent = HGF( - n_levels=2, - verbose=False, - model_type="binary", - initial_mean={"1": 0.5, "2": 0.0}, -) -``` - -```{code-cell} ipython3 -# observations (always the same), simulated decisions, sample values for temperature and volatility -responses = [] -for i in range(N): - # sample one new value of the tonic volatility at the second level and fit to observations - volatility = np.random.normal(-4.0, 1.0) - agent.attributes[2]["tonic_volatility"] = volatility - agent.input_data(input_data=u) - - # sample one value for the inverse temperature (here in log space) and simulate responses - temperature = np.exp(np.random.normal(.5, .5)) - p = sigmoid(x=agent.node_trajectories[1]["expected_mean"], temperature=temperature) - - # store observations and decisions separately - responses.append(np.random.binomial(p=p, n=1)) -``` - -## A multilevel binary HGF -In this part, we start embedding the HGF in a multilevel model using PyMC. We use the same core distribution (the [HGFDistribution class](pyhgf.distribution.HGFDistribution)) and leverage the possibility of automatic broadcasting (see below) to apply the same procedure to multiple HGF models in parallel. Note that the list of observations and decisions from all participants is provided and stored when we create the [PyTensor](https://pytensor.readthedocs.io) Op instance so it is not necessary to provide it thereafter. - -+++ - -```{hint} Using automatic broadcasting -To estimate group-level parameters, we will have to fit multiple models at the same time, either on different input data, on the same data with different parameters or on different datasets with different parameters. This step is handled natively both by the [log probability function](pyhgf.distribution.hgf_logp) and the [HGFDistribution class](pyhgf.distribution.HGFDistribution) using a pseudo [broadcasting](https://numpy.org/doc/stable/user/basics.broadcasting.html) approach. When a list of *n* input time series is provided, the function will automatically apply *n* models using the provided parameters. If for some parameters an array of length *n* is provided, each model will use the n-th value as a parameter. Here, we are going to rely on this feature to compute the log probability of *n* model, using *n* time series as input and *n* different parameters to test. -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -hgf_logp_op = HGFDistribution( - n_levels=2, - model_type="binary", - input_data=[u] * N, # the inputs are the same for all agents - just duplicate the array - response_function=binary_softmax_inverse_temperature, - response_function_inputs=responses, -) -``` - -```{code-cell} ipython3 -with pm.Model() as two_levels_binary_hgf: - - # tonic volatility - mu_volatility = pm.Normal("mu_volatility", -5, 5) - sigma_volatility = pm.HalfNormal("sigma_volatility", 10) - volatility = pm.Normal("volatility", mu=mu_volatility, sigma=sigma_volatility, shape=N) - - # inverse temperature - mu_temperature = pm.Normal("mu_temperature", 0, 2) - sigma_temperature = pm.HalfNormal("sigma_temperature", 2) - inverse_temperature = pm.LogNormal("inverse_temperature", mu=mu_temperature, sigma=sigma_temperature, shape=N) - - # The multi-HGF distribution - # -------------------------- - pm.Potential("hgf_loglike", - hgf_logp_op( - tonic_volatility_2=volatility, - response_function_parameters=inverse_temperature - ) - ) -``` - -## Plot the computational graph -The multilevel model includes hyperpriors over the mean and standard deviation of both the inverse temperature and the tonic volatility of the second level. - -```{note} -We are sampling the inverse temperature in log space to ensure it will always be higher than 0, while being able to use normal hyper-priors at the group level. -``` - -```{code-cell} ipython3 -pm.model_to_graphviz(two_levels_binary_hgf) -``` - -## Sampling - -```{code-cell} ipython3 -with two_levels_binary_hgf: - two_level_hgf_idata = pm.sample(chains=2, cores=1) -``` - -## Visualization of the posterior distributions - -```{code-cell} ipython3 -az.plot_posterior(two_level_hgf_idata, var_names=["mu_temperature", "mu_volatility"], ref_val=[.5, -4.0]) -plt.tight_layout() -``` - -The reference values on both posterior distributions indicate the mean of the distribution used for simulation. - -+++ - -# System configuration - -```{code-cell} ipython3 -%load_ext watermark -%watermark -n -u -v -iv -w -p pyhgf,jax,jaxlib -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- - -``` diff --git a/docs/source/notebooks/4-Parameter_recovery.ipynb b/docs/source/notebooks/4-Parameter_recovery.ipynb index 8127ca43c..8f8255863 100644 --- a/docs/source/notebooks/4-Parameter_recovery.ipynb +++ b/docs/source/notebooks/4-Parameter_recovery.ipynb @@ -99,7 +99,7 @@ "tags": [] }, "source": [ - "An important application of Hierarchical Gaussian Filters consists in the inference of computational parameters from observed behaviours, as well as the inference of data-generating models (e.g. are the participants answering randomly or are they learning environmental volatilities that are better approached with a Rescorla-Wagner or a Hierarchical Gaussian Filter?). **Parameter recovery** refers to the ability to recover true data-generating parameters; **model recovery** refers to the ability to correctly identify the true data-generating model using model comparison techniques. It is often a good idea to test parameter/model recovery of a computational model using simulated data before applying this model to experimental data {citep}`RobertCollins2019`. In this tutorial, we demonstrate how to recover some parameters of the generative model of the Hierarchical Gaussian Filter." + "An important application of Hierarchical Gaussian Filters consists in the inference of computational parameters from observed behaviours, as well as the inference of data-generating models (e.g. are the participants answering randomly or are they learning environmental volatilities that are better approached with a Rescorla-Wagner or a Hierarchical Gaussian Filter?). **Parameter recovery** refers to the ability to recover true data-generating parameters; **model recovery** refers to the ability to correctly identify the true data-generating model using model comparison techniques. It is often a good idea to test parameter/model recovery of a computational model using simulated data before applying this model to experimental data {cite:p}`RobertCollins2019`. In this tutorial, we demonstrate how to recover some parameters of the generative model of the Hierarchical Gaussian Filter." ] }, { @@ -561,9 +561,6 @@ "interpreter": { "hash": "40d3a090f54c6569ab1632332b64b2c03c39dcf918b08424e98f38b5ae0af88f" }, - "jupytext": { - "formats": "ipynb,md:myst" - }, "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", diff --git a/docs/source/notebooks/4-Parameter_recovery.md b/docs/source/notebooks/4-Parameter_recovery.md deleted file mode 100644 index 8e067f03f..000000000 --- a/docs/source/notebooks/4-Parameter_recovery.md +++ /dev/null @@ -1,253 +0,0 @@ ---- -jupytext: - formats: ipynb,md:myst - text_representation: - extension: .md - format_name: myst - format_version: 0.13 - jupytext_version: 1.16.1 -kernelspec: - display_name: Python 3 (ipykernel) - language: python - name: python3 ---- - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -(parameters_recovery)= -# Recovering computational parameters from observed behaviours - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/ilabcode/pyhgf/blob/master/docs/source/notebooks/4-Parameter_recovery.ipynb) - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' -tags: [hide-cell] ---- -%%capture -import sys - -if 'google.colab' in sys.modules: - !pip install pyhgf watermark -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -import arviz as az -import jax.numpy as jnp -import matplotlib.pyplot as plt -import numpy as np -import pymc as pm -import seaborn as sns -from numpy import loadtxt - -from pyhgf import load_data -from pyhgf.distribution import HGFDistribution, hgf_logp -from pyhgf.model import HGF -from pyhgf.response import binary_softmax_inverse_temperature -``` - -```{code-cell} ipython3 -np.random.seed(123) -``` - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -An important application of Hierarchical Gaussian Filters consists in the inference of computational parameters from observed behaviours, as well as the inference of data-generating models (e.g. are the participants answering randomly or are they learning environmental volatilities that are better approached with a Rescorla-Wagner or a Hierarchical Gaussian Filter?). **Parameter recovery** refers to the ability to recover true data-generating parameters; **model recovery** refers to the ability to correctly identify the true data-generating model using model comparison techniques. It is often a good idea to test parameter/model recovery of a computational model using simulated data before applying this model to experimental data {citep}`RobertCollins2019`. In this tutorial, we demonstrate how to recover some parameters of the generative model of the Hierarchical Gaussian Filter. - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -## Simulate behaviours from a one-armed bandit task -Using a given task structure, we simulate behaviours from a group of participants assuming that they are updating beliefs of environmental volatility using a two-level Hierarchical Gaussian Filter, using a simple sigmoid as a response function parametrized by an inverse temperature parameter. For each participant, the inverse temperature and the tonic volatility at the second level are free parameters that will be estimated during the inference step. - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -u, _ = load_data("binary") # the vector encoding the presence/absence of association - -N = 20 # the number of agents to simulate - -# sample one value for the inverse temperature (here in log space) and simulate responses -temperatures = np.linspace(0.5, 6.0, num=N) - -# sample one new value of the tonic volatility at the second level and fit to observations -volatilities = np.linspace(-6.0, -1.0, num=N) -``` - -```{code-cell} ipython3 -def sigmoid(x, temperature): - """The sigmoid response function with an inverse temperature parameter.""" - return (x**temperature) / (x**temperature + (1-x)**temperature) -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -# create just one default network - we will simply change the values of interest before fitting to save time -agent = HGF( - n_levels=2, - verbose=False, - model_type="binary", - initial_mean={"1": 0.5, "2": 0.0}, -) -``` - -```{code-cell} ipython3 -# observations (always the same), simulated decisions, sample values for temperature and volatility -responses = [] -for i in range(N): - # set the tonic volatility for this agent and run the perceptual model forward - agent.attributes[2]["tonic_volatility"] = volatilities[i] - agent.input_data(input_data=u) - - # get decision probabilities using the belief trajectories - # and the sigmoid decision function with inverse temperature - p = sigmoid( - x=agent.node_trajectories[1]["expected_mean"], temperature=temperatures[i] - ) - - # save the observations and decisions separately - responses.append(np.random.binomial(p=p, n=1)) -``` - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -## Inference from the simulated behaviours - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -hgf_logp_op = HGFDistribution( - n_levels=2, - model_type="binary", - input_data=[u] * N, # the inputs are the same for all agents - just duplicate the array - response_function=binary_softmax_inverse_temperature, - response_function_inputs=responses, -) -``` - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -Here, we are not assuming hyperpriors to ensure that individual estimates are independent and avoid hierarchical partial pooling. - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -with pm.Model() as two_levels_binary_hgf: - - # tonic volatility - volatility = pm.Normal.dist(-3.0, 5, shape=N) - censored_volatility = pm.Censored("censored_volatility", volatility, lower=-8, upper=2) - - # inverse temperature - inverse_temperature = pm.Uniform("inverse_temperature", .1, 80, shape=N, initval=np.ones(N)) - - # The multi-HGF distribution - # -------------------------- - pm.Potential( - "hgf_loglike", - hgf_logp_op( - tonic_volatility_2=censored_volatility, - response_function_parameters=inverse_temperature, - ), - ) -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -with two_levels_binary_hgf: - two_level_hgf_idata = pm.sample(chains=2, cores=1) -``` - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -## Visualizing parameters recovery - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -A successful parameter recovery is usually inferred from the scatterplot of simulated values and inferred values of the parameters. Here, we can see that the model can recover fairly accurate values close to the underlying parameters. Additionally, we can report the coefficient of correlation between the two variables, as a more objective measure of correspondence. - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' -tags: [hide-input] ---- -fig, axs = plt.subplots(figsize=(12, 5), ncols=2) - -axs[0].plot([-6.0, 0.0], [-6.0, 0.0], color="grey", linestyle="--", zorder=-1) -axs[1].plot([0.0, 7.0], [0.0, 7.0], color="grey", linestyle="--", zorder=-1) - -for var_name, refs, idx in zip( - ["censored_volatility", "inverse_temperature"], - [volatilities, temperatures], - [0, 1], -): - inferred_parameters = az.summary(two_level_hgf_idata, var_names=var_name)["mean"].tolist() - - sns.kdeplot( - x=refs, - y=inferred_parameters, - ax=axs[idx], - fill=True, - cmap="Reds" if var_name == "censored_volatility" else "Greens", - alpha=.4 - ) - - axs[idx].scatter( - refs, - az.summary(two_level_hgf_idata, var_names=var_name)["mean"].tolist(), - s=60, - alpha=.8, - edgecolors="k", - color= "#c44e52" if var_name == "censored_volatility" else "#55a868" - ) - - axs[idx].grid(True, linestyle='--') - axs[idx].set_xlabel("Simulated parameter") - axs[idx].set_ylabel("Infered parameter") - - -axs[0].set_title("Second level tonic volatility") -axs[1].set_title("Inverse temperature") -sns.despine() -``` - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -# System configuration - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -%load_ext watermark -%watermark -n -u -v -iv -w -p pyhgf,jax,jaxlib -``` diff --git a/docs/source/notebooks/Example_1_Heart_rate_variability.ipynb b/docs/source/notebooks/Example_1_Heart_rate_variability.ipynb index 3b7f538fc..988d8b45c 100644 --- a/docs/source/notebooks/Example_1_Heart_rate_variability.ipynb +++ b/docs/source/notebooks/Example_1_Heart_rate_variability.ipynb @@ -498,9 +498,6 @@ "interpreter": { "hash": "40d3a090f54c6569ab1632332b64b2c03c39dcf918b08424e98f38b5ae0af88f" }, - "jupytext": { - "formats": "ipynb,md:myst" - }, "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", diff --git a/docs/source/notebooks/Example_1_Heart_rate_variability.md b/docs/source/notebooks/Example_1_Heart_rate_variability.md deleted file mode 100644 index 189cdedec..000000000 --- a/docs/source/notebooks/Example_1_Heart_rate_variability.md +++ /dev/null @@ -1,159 +0,0 @@ ---- -jupytext: - formats: ipynb,md:myst - text_representation: - extension: .md - format_name: myst - format_version: 0.13 - jupytext_version: 1.16.1 -kernelspec: - display_name: Python 3 (ipykernel) - language: python - name: python3 ---- - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -(example_1)= -# Example 1: Bayesian filtering of cardiac volatility - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/ilabcode/pyhgf/blob/master/docs/source/notebooks/Example_1_Heart_rate_variability.ipynb) - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' -tags: [hide-cell] ---- -%%capture -import sys -if 'google.colab' in sys.modules: - !pip install pyhgf systole watermark -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -import arviz as az -import matplotlib.pyplot as plt -import numpy as np -import pymc as pm -import seaborn as sns -from systole import import_dataset1 -from systole.detection import ecg_peaks -from systole.plots import plot_raw -from systole.utils import input_conversion - -from pyhgf.distribution import HGFDistribution -from pyhgf.model import HGF -from pyhgf.response import total_gaussian_surprise -``` - -The nodalized version of the Hierarchical Gaussian Filter that is implemented in [pyhgf](https://github.com/ilabcode/pyhgf) opens the possibility to create filters with multiple inputs. Here, we illustrate how we can use this feature to create an agent that is filtering their physiological signals in real-time. We use a two-level Hierarchical Gaussian Filter to predict the dynamics of the instantaneous heart rate (the RR interval measured at each heartbeat). We then extract the trajectory of surprise at each predictive node to relate it with the cognitive task performed by the participant while the signal is being recorded. - -+++ - -## Loading and preprocessing physiological recording -We use the physiological dataset included in [Systole](https://embodied-computation-group.github.io/systole/index.html) as an example. This recording contains electrocardiography (ECG) and respiration recording. - -```{code-cell} ipython3 -# Import PPG recording as pandas data frame -physio_df = import_dataset1(modalities=['ECG', 'Respiration']) - -# Only use the first 60 seconds for demonstration -ecg = physio_df.ecg -``` - -### Plot the signal with instantaneous heart rate derivations - -```{code-cell} ipython3 -plot_raw(ecg, modality='ecg', sfreq=1000, show_heart_rate=True); -``` - -### Preprocessing - -```{code-cell} ipython3 -# detect R peaks using Pan-Tomkins algorithm -_, peaks = ecg_peaks(physio_df.ecg) - -# convert the peaks into a RR time series -rr = input_conversion(x=peaks, input_type="peaks", output_type="rr_s") -``` - -## Model - -+++ - -```{note} -Here we use the total Gaussian surprise ({py:func}`pyhgf.response.total_gaussian_surprise`) as a response function. This response function deviates from the default behaviour for the continuous HGF in that it returns the sum of the surprise for all the probabilistic nodes in the network, whereas the default ({py:func}`pyhgf.response.first_level_gaussian_surprise`) only computes the surprise at the first level (i.e. the value parent of the continuous input node). We explicitly specify this parameter here to indicate that we want our model to minimise its prediction errors over all variables, and not only at the observation level. In this case, however, the results are expected to be very similar between the two methods. -``` - -```{code-cell} ipython3 -hgf_logp_op = HGFDistribution( - n_levels=2, - model_type="continuous", - input_data=[rr], - response_function=total_gaussian_surprise, -) -``` - -```{code-cell} ipython3 -with pm.Model() as three_level_hgf: - - # omegas priors - tonic_volatility_2 = pm.Normal("tonic_volatility_2", -2.0, 2.0) - - # HGF distribution - pm.Potential("hgf_loglike", hgf_logp_op(tonic_volatility_1=-4.0, tonic_volatility_2=tonic_volatility_2)) -``` - -```{code-cell} ipython3 -pm.model_to_graphviz(three_level_hgf) -``` - -```{code-cell} ipython3 -with three_level_hgf: - idata = pm.sample(chains=2) -``` - -```{code-cell} ipython3 -az.plot_trace(idata) -plt.tight_layout() -``` - -```{code-cell} ipython3 -# retrieve the best fir for omega_2 -tonic_volatility_2 = az.summary(idata)["mean"]["tonic_volatility_2"] -``` - -```{code-cell} ipython3 -hgf = HGF( - n_levels=2, - model_type="continuous", - initial_mean={"1": rr[0], "2": -4.0}, - initial_precision={"1": 1e4, "2": 1e1}, - tonic_volatility={"1": -4.0, "2": tonic_volatility_2}, - tonic_drift={"1": 0.0, "2": 0.0}, - volatility_coupling={"1": 1.0}).input_data(input_data=rr) -``` - -```{code-cell} ipython3 -hgf.plot_trajectories(); -``` - -# System configuration - -```{code-cell} ipython3 -%load_ext watermark -%watermark -n -u -v -iv -w -p pyhgf,jax,jaxlib -``` - -```{code-cell} ipython3 - -``` diff --git a/docs/source/notebooks/Example_2_Input_node_volatility_coupling.ipynb b/docs/source/notebooks/Example_2_Input_node_volatility_coupling.ipynb index 3ae1a118d..e46d48a7f 100644 --- a/docs/source/notebooks/Example_2_Input_node_volatility_coupling.ipynb +++ b/docs/source/notebooks/Example_2_Input_node_volatility_coupling.ipynb @@ -544,9 +544,6 @@ } ], "metadata": { - "jupytext": { - "formats": "ipynb,md:myst" - }, "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", @@ -562,7 +559,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.16" + "version": "3.12.3" } }, "nbformat": 4, diff --git a/docs/source/notebooks/Example_2_Input_node_volatility_coupling.md b/docs/source/notebooks/Example_2_Input_node_volatility_coupling.md deleted file mode 100644 index 57783c465..000000000 --- a/docs/source/notebooks/Example_2_Input_node_volatility_coupling.md +++ /dev/null @@ -1,218 +0,0 @@ ---- -jupytext: - formats: ipynb,md:myst - text_representation: - extension: .md - format_name: myst - format_version: 0.13 - jupytext_version: 1.15.1 -kernelspec: - display_name: Python 3 (ipykernel) - language: python - name: python3 ---- - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -(example_2)= -# Example 2: Estimating the mean and precision of an input node - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/ilabcode/pyhgf/blob/master/docs/source/notebooks/Example_2_Input_node_volatility_coupling.ipynb) - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' -tags: [hide-cell] ---- -%%capture -import sys -if 'google.colab' in sys.modules: - !pip install pyhgf watermark -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -import arviz as az -import matplotlib.pyplot as plt -import numpy as np -import pymc as pm -import seaborn as sns -from scipy.stats import norm - -from pyhgf.distribution import HGFDistribution -from pyhgf.model import HGF -``` - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -Where the standard continuous HGF assumes a known precision in the input node (usually set to something high), this assumption can be relaxed and the filter can also try to estimate this quantity from the data. In this notebook, we demonstrate how we can infer the value of the mean, of the precision, or both value at the same time, using the appropriate value and volatility coupling parents. - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -## Unkown mean, known precision - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -```{hint} -The {ref}`continuous_hgf` is an example of a model assuming a continuous input with known precision and unknown mean. It is further assumed that the mean is changing overtime, and we want the model to track this rate of change by adding a volatility node on the top of the value parent (two-level continuous HGF), and event track the rate of change of this rate of change by adding another volatility parent (three-level continuous HGF). -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -np.random.seed(123) -dist_mean, dist_std = 5, 1 -input_data = np.random.normal(loc=dist_mean, scale=dist_std, size=1000) -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -mean_hgf = ( - HGF(model_type=None, update_type="standard") - .add_nodes(kind="continuous-input", node_parameters={'input_noise': 1.0, "expected_precision": 1.0}) - .add_nodes(value_children=0, node_parameters={"tonic_volatility": -8.0}) -).input_data(input_data) -mean_hgf.plot_network() -``` - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -```{note} -We are setting the tonic volatility to something low for visualization purposes, but changing this value can make the model learn in fewer iterations. -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' -tags: [hide-input] ---- -# get the nodes trajectories -df = mean_hgf.to_pandas() - -fig, ax = plt.subplots(figsize=(12, 5)) - -x = np.linspace(-10, 10, 1000) -for i, color in zip([0, 2, 5, 10, 50, 500], plt.cm.Greys(np.linspace(.2, 1, 6))): - - # extract the sufficient statistics from the input node (and parents) - mean = df.x_1_expected_mean.iloc[i] - std = np.sqrt( - 1/(mean_hgf.attributes[0]["expected_precision"]) - ) - - # the model expectations - ax.plot(x, norm(mean, std).pdf(x), color=color, label=i) - -# the sampling distribution -ax.fill_between(x, norm(dist_mean, dist_std).pdf(x), color="#582766", alpha=.2) - -ax.legend(title="Iterations") -ax.set_xlabel("Input (u)") -ax.set_ylabel("Density") -plt.grid(linestyle=":") -sns.despine() -``` - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -## Kown mean, unknown precision - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -## Unkown mean, unknown precision - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -np.random.seed(123) -dist_mean, dist_std = 5, 1 -input_data = np.random.normal(loc=dist_mean, scale=dist_std, size=1000) -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -mean_precision_hgf = ( - HGF(model_type=None) - .add_nodes(kind="continuous-input", node_parameters={'input_noise': 0.01, "expected_precision": 0.01}) - .add_nodes(value_children=0, node_parameters={"tonic_volatility": -6.0}) - .add_nodes(volatility_children=0, node_parameters={"tonic_volatility": -6.0}) -).input_data(input_data) -mean_precision_hgf.plot_network() -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' -tags: [hide-input] ---- -# get the nodes trajectories -df = mean_precision_hgf.to_pandas() - -fig, ax = plt.subplots(figsize=(12, 5)) - -x = np.linspace(-10, 10, 1000) -for i, color in zip(range(0, 150, 15), plt.cm.Greys(np.linspace(.2, 1, 10))): - - # extract the sufficient statistics from the input node (and parents) - mean = df.x_1_expected_mean.iloc[i] - std = np.sqrt( - 1/(mean_precision_hgf.attributes[0]["expected_precision"] * (1/np.exp(df.x_2_expected_mean.iloc[i]))) -) - - # the model expectations - ax.plot(x, norm(mean, std).pdf(x), color=color, label=i) - - -# the sampling distribution -ax.fill_between(x, norm(dist_mean, dist_std).pdf(x), color="#582766", alpha=.2) - -ax.legend(title="Iterations") -ax.set_xlabel("Input (u)") -ax.set_ylabel("Density") -plt.grid(linestyle=":") -sns.despine() -``` - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -## System configuration - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -%load_ext watermark -%watermark -n -u -v -iv -w -p pyhgf,jax,jaxlib -``` - -```{code-cell} ipython3 - -``` diff --git a/docs/source/notebooks/Example_3_Multi_armed_bandit.ipynb b/docs/source/notebooks/Example_3_Multi_armed_bandit.ipynb index ab38b1ea3..4f5223dd1 100644 --- a/docs/source/notebooks/Example_3_Multi_armed_bandit.ipynb +++ b/docs/source/notebooks/Example_3_Multi_armed_bandit.ipynb @@ -1738,9 +1738,6 @@ } ], "metadata": { - "jupytext": { - "formats": "ipynb,md:myst" - }, "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", @@ -1756,7 +1753,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.16" + "version": "3.12.3" } }, "nbformat": 4, diff --git a/docs/source/notebooks/Example_3_Multi_armed_bandit.md b/docs/source/notebooks/Example_3_Multi_armed_bandit.md deleted file mode 100644 index 79dd2c548..000000000 --- a/docs/source/notebooks/Example_3_Multi_armed_bandit.md +++ /dev/null @@ -1,621 +0,0 @@ ---- -jupytext: - formats: ipynb,md:myst - text_representation: - extension: .md - format_name: myst - format_version: 0.13 - jupytext_version: 1.15.1 -kernelspec: - display_name: Python 3 (ipykernel) - language: python - name: python3 ---- - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -(example_3)= -# Example 3: A multi-armed bandit task with independent rewards and punishments - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/ilabcode/pyhgf/blob/master/docs/source/notebooks/Example_2_Input_node_volatility_coupling.ipynb) - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' -tags: [hide-cell] ---- -%%capture -import sys -if 'google.colab' in sys.modules: - !pip install pyhgf watermark -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -import arviz as az -import matplotlib.pyplot as plt -import numpy as np -import pymc as pm -import seaborn as sns -from scipy.stats import norm -import jax.numpy as jnp -from pyhgf.math import binary_surprise -from jax.tree_util import Partial -from jax import jit -from pytensor.graph import Apply, Op -import pytensor.tensor as pt -from jax import grad, jit, vjp -from pyhgf.distribution import HGFDistribution -from pyhgf.model import HGF - -np.random.seed(123) -``` - -+++ {"editable": true, "jp-MarkdownHeadingCollapsed": true, "slideshow": {"slide_type": ""}} - -In this notebook, we are going to illustrate how to fit behavioural responses from a two-armed bandit task when the rewards and punishments are independent using a task similar to what was used in {cite:p}`Pulcu2017` (see {ref}`task`). This will also illustrate how we can use missing/unobserved values in an input node, the impact this has on the belief trajectories, and how to deal with models where the decisions of the agent influence the observations. - -```{figure} https://iiif.elifesciences.org/lax/27879%2Felife-27879-fig1-v3.tif/full/1500,/0/default.jpg ---- -name: task ---- -Experimental design - -In the task considered here, two armed bandits are presented to the participant on each trial, and the participant has to select one of them to get the reward and punishments associated. In our simulation, we generalize further and consider that for both arms, the rewards and punishments are independent, which means that the participant has to infer four probabilities: $\{P(reward|A), P(loss|A), P(reward|B), P(loss|B)\}$. Because the rewards and punishments are independent, we simulate the task using four binary HGFs. Figure from {cite:p}`Pulcu2017`. -``` - -```{note} -While the binary HGF is a special case of the categorical HGF where the number of categories is set to 2, the categorical HGF assumes that only one category is presented on every trial, which is different from what we have here as on some trials, we could have both reward and loss on both arms. Also, a categorical HGF adds a volatility coupling between the binary branch (see {ref}`categorical_hgf`). Therefore, this model would not be suitable here as we want every branch of the network to evolve independently. -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -# the tonic volatility used across the tutorial -# this value is the same for four branches of the network -tonic_volatility = -1.5 -``` - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -We start by creating a network that consists in four two-levels HGF. Each branch is evolving independently and is not affecting the beliefs trajectories of other branches. - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -two_armed_bandit_hgf = ( - HGF(model_type=None) - .add_nodes(kind="binary-input", n_nodes=4) - .add_nodes(kind="binary-state", value_children=0) - .add_nodes(kind="binary-state", value_children=1) - .add_nodes(kind="binary-state", value_children=2) - .add_nodes(kind="binary-state", value_children=3) - .add_nodes(value_children=4, node_parameters={"tonic_volatility": tonic_volatility}) - .add_nodes(value_children=5, node_parameters={"tonic_volatility": tonic_volatility}) - .add_nodes(value_children=6, node_parameters={"tonic_volatility": tonic_volatility}) - .add_nodes(value_children=7, node_parameters={"tonic_volatility": tonic_volatility}) -) -two_armed_bandit_hgf.plot_network() -``` - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -## Task structure -We start with a simple task structure where contingencies (i.e. the probability that a given armed bandit is associated with a win/loss) alternate between `0.2`, `0.5` and `0.8`. The rate of change in contingencies can be fast (i.e. high volatility blocks) or slow (i.e. low volatility blocks). - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -# three levels of contingencies -high_prob, chance, low_prob = 0.8, 0.5, 0.2 - -# create blocks of contingencies -stable_contingencies = np.array([low_prob, high_prob]).repeat(40) -volatile_contingencies = np.tile(np.array([low_prob, high_prob]).repeat(10), 6) -chance_contingencies = np.array(chance).repeat(80) -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -# create sequences of blocks for each arm/rewards -win_arm1 = np.concatenate([stable_contingencies, chance_contingencies, volatile_contingencies]) -loss_arm1 = np.concatenate([volatile_contingencies, chance_contingencies, stable_contingencies]) -win_arm2 = np.concatenate([chance_contingencies, stable_contingencies, volatile_contingencies]) -loss_arm2 = np.concatenate([chance_contingencies, volatile_contingencies, stable_contingencies]) -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -# sample trial level outcomes from the sequences -u_win_arm1 = np.random.binomial(n=1, p=win_arm1) -u_loss_arm1 = np.random.binomial(n=1, p=loss_arm1) -u_win_arm2 = np.random.binomial(n=1, p=win_arm2) -u_loss_arm2 = np.random.binomial(n=1, p=loss_arm2) -``` - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -This gives the following task structure: - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' -tags: [hide-input, full-width] ---- -# trial numbers -trials = np.arange(len(win_arm1)) - -_, axs = plt.subplots(figsize=(12, 6), nrows=4, sharex=True, sharey=True) - -for i, u, p, label, color in zip( - range(4), - [u_win_arm1, u_loss_arm1, u_win_arm2, u_loss_arm2], - [win_arm1, loss_arm1, win_arm2, loss_arm2], - ["P(Reward|A)", "P(Loss|A)", "P(Reward|B)", "P(Loss|B)"], - ["seagreen", "firebrick", "seagreen", "firebrick"] -): - axs[i].scatter(trials, u, label="outcomes", alpha=.6, s=10, color="gray", edgecolor="k") - axs[i].plot(trials, p, "--", color=color) - axs[i].fill_between(trials, p, "--", label=label, color=color, alpha=.05) - axs[i].legend(loc='upper right') - axs[i].set_ylabel(label) -plt.tight_layout() -sns.despine(); -``` - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -## Simulate a dataset - -We can simulate a vector of observation. This is a two-dimensional matrix with input observations for the four components of our model. - -```{code-cell} ipython3 -u = np.array([u_win_arm1, u_loss_arm1, u_win_arm2, u_loss_arm2]) -``` - -From there, it is straightforward to feed these observations to our four branches to get an estimate of the trajectories. - -```{note} -Here, we are providing all the outcomes from all trials. This is not how the task would work as the participant will only be presented with the outcomes from the armed bandit chosen, and we are not using responses from the participant yet. See also {ref}`custom_response_functions` for details on the observations `u` and the responses `y`. -``` - -```{code-cell} ipython3 -two_armed_bandit_hgf.input_data(input_data=u.T); -``` - -```{code-cell} ipython3 -_, axs = plt.subplots(figsize=(12, 6), nrows=4, sharex=True, sharey=True) - -two_armed_bandit_hgf.plot_nodes(node_idxs=4, axs=axs[0]) -two_armed_bandit_hgf.plot_nodes(node_idxs=5, axs=axs[1]) -two_armed_bandit_hgf.plot_nodes(node_idxs=6, axs=axs[2]) -two_armed_bandit_hgf.plot_nodes(node_idxs=7, axs=axs[3]) - -for i, p, label, color in zip( - range(4), - [win_arm1, loss_arm1, win_arm2, loss_arm2], - ["P(Reward|A)", "P(Loss|A)", "P(Reward|B)", "P(Loss|B)"], - ["seagreen", "firebrick", "seagreen", "firebrick"] -): - axs[i].plot(trials, p, "--", label=label, color=color) - axs[i].legend(loc='upper right') - -plt.tight_layout() -sns.despine(); -``` - -## Simulate responses from a participant -Using the beliefs trajectories recovered from the model fits above, we can simulate responses from a participant that would use the same parametrisation of beliefs update (especially the same `tonic_volatility` here). - -+++ - -### Decision rule - -The probability of chosing the arm $A$ given the expected probabilities of wining on both arms ${W_a; W_b}$ and the expected probabilities of loosing on both arms ${L_a; L_b}$, is given by the following softmax decision function: - -$$ -p(A) = \frac{e^{\beta(W_a-L_a)}}{e^{\beta(W_a-L_a)} + e^{\beta(W_b-L_b)}} -$$ - -where $\beta$ is the inverse temperature parameter. - -```{code-cell} ipython3 -beta = 1.0 # temperature parameter -w_a = two_armed_bandit_hgf.node_trajectories[4]["expected_mean"] -l_a = two_armed_bandit_hgf.node_trajectories[5]["expected_mean"] -w_b = two_armed_bandit_hgf.node_trajectories[6]["expected_mean"] -l_b = two_armed_bandit_hgf.node_trajectories[7]["expected_mean"] - -p_a = np.exp(beta * (w_a-l_a)) / ( np.exp(beta * (w_a-l_a)) + np.exp(beta * (w_b-l_b))) -``` - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -Using these probabilities, we can infer which arm was selected at each trial and filter the inputs that are presented to the participant. Because it would be too chaotic to provide the information about the four hidden states at each trial, here the participant is only presented with the information about the arm that was selected. Therefore, when arm $A$ is selected, the inputs from arm $B$ are hidden. - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -```{note} -Input nodes can receive missing or unobserved values. Missing inputs are used to indicate an absence of observation from the agent's point of view and should not be used for missing records or excluded trials. When an input is labelled as missing, we use the total volatility at the parents' level to decrease their precision as a function of time elapsed, but the mean of the belief is still the same. This behaviour accounts for the fact that the Gaussian Random Walk continues while no observations are made and the uncertainty increases accordingly. It could be assumed that there is also a limit to this behaviour and that the uncertainty will not increase infinitely. AR1 nodes are good candidates as volatility parents to implement this behaviour. -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -# a new matrix of observations -missing_inputs_u = u.astype(float) - -# create a mask to filter the observations using the agent's decisions -observed = np.ones(u.shape) -observed[:2, p_a<=.5] = 0 -observed[2:, p_a>.5] = 0 -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -two_armed_bandit_missing_inputs_hgf = ( - HGF(model_type=None) - .add_nodes(kind="binary-input", n_nodes=4) - .add_nodes(kind="binary-state", value_children=0) - .add_nodes(kind="binary-state", value_children=1) - .add_nodes(kind="binary-state", value_children=2) - .add_nodes(kind="binary-state", value_children=3) - .add_nodes(value_children=4, node_parameters={"tonic_volatility": tonic_volatility}) - .add_nodes(value_children=5, node_parameters={"tonic_volatility": tonic_volatility}) - .add_nodes(value_children=6, node_parameters={"tonic_volatility": tonic_volatility}) - .add_nodes(value_children=7, node_parameters={"tonic_volatility": tonic_volatility}) -) -two_armed_bandit_missing_inputs_hgf.plot_network() -``` - -```{code-cell} ipython3 -# note that we are providing the mask as parameter of the input function -two_armed_bandit_missing_inputs_hgf.input_data( - input_data=missing_inputs_u.T, - observed=observed.T, -); -``` - -```{code-cell} ipython3 -_, axs = plt.subplots(figsize=(12, 6), nrows=4, sharex=True, sharey=True) - -two_armed_bandit_missing_inputs_hgf.plot_nodes(node_idxs=4, axs=axs[0]) -two_armed_bandit_missing_inputs_hgf.plot_nodes(node_idxs=5, axs=axs[1]) -two_armed_bandit_missing_inputs_hgf.plot_nodes(node_idxs=6, axs=axs[2]) -two_armed_bandit_missing_inputs_hgf.plot_nodes(node_idxs=7, axs=axs[3]) - -plt.tight_layout() -sns.despine(); -``` - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -We can now see from the plot above that the branches of the networks are only updated if the participant actually chose the corresponding arm. Otherwise, the expected probability remains the same but the uncertainty will increase over time. - -+++ - -## Parameter recovery - -Now that we have set a model with the required number of branches, and allowed this model to observe missing inputs, we can simulate responses from a participant and try to recover the parameters of interest (here focusing on the tonic volatility at the second level). It should be noted here that this kind of model implies that the decisions of the agent directly influence the observations, as only the outcomes from the selected armed bandit are presented to the participant. This implies that - in the context of simulations - we cannot iterate over the whole batch of observations before estimating the decision function, and should instead compute every step sequentially in a for loop. This is an important difference as most of the models discussed in the other tutorials filter all the observations first, and then compute the response costs, as the two processes are not influencing each other. - -### Real-time decision and belief updating - -We can implement this process using the {py:func}`pyhgf.networks.beliefs_propagation` function. In other models, this step is called sequentially in a {py:func}`jax.lax.scan` loop, but we can also use it for a single-step update. - -```{code-cell} ipython3 -from pyhgf.networks import beliefs_propagation -``` - -```{code-cell} ipython3 -two_armed_bandit_missing_inputs_hgf = ( - HGF(model_type=None, verbose=False) - .add_nodes(kind="binary-input", n_nodes=4) - .add_nodes(kind="binary-state", value_children=0) - .add_nodes(kind="binary-state", value_children=1) - .add_nodes(kind="binary-state", value_children=2) - .add_nodes(kind="binary-state", value_children=3) - .add_nodes(value_children=4, node_parameters={"tonic_volatility": tonic_volatility}) - .add_nodes(value_children=5, node_parameters={"tonic_volatility": tonic_volatility}) - .add_nodes(value_children=6, node_parameters={"tonic_volatility": tonic_volatility}) - .add_nodes(value_children=7, node_parameters={"tonic_volatility": tonic_volatility}) -).create_belief_propagation_fn() -``` - -```{code-cell} ipython3 -two_armed_bandit_missing_inputs_hgf.plot_network() -``` - -```{code-cell} ipython3 -# get the network variables from the HGF class -attributes = two_armed_bandit_missing_inputs_hgf.attributes -update_sequence = two_armed_bandit_missing_inputs_hgf.update_sequence -edges = two_armed_bandit_missing_inputs_hgf.edges -input_nodes_idx = two_armed_bandit_missing_inputs_hgf.input_nodes_idx.idx -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -input_data = u.astype(float).T -responses = [] # 1: arm A - 0: arm B - -# for each observation -for i in range(input_data.shape[0]): - - # the observation mask - default to 1 - observed = np.ones(4) - - # the time elapsed between two trials - defaults to 1 - time_steps = np.ones(1) - - # the expectations about the outcomes - w_a = attributes[4]["expected_mean"] - l_a = attributes[5]["expected_mean"] - w_b = attributes[6]["expected_mean"] - l_b = attributes[7]["expected_mean"] - - # the decision function - p_a = np.exp(beta * (w_a-l_a)) / ( np.exp(beta * (w_a-l_a)) + np.exp(beta * (w_b-l_b))) - - # sample a decision using the probability - response = np.random.binomial(p=p_a, n=1) - responses.append(response) - - # hide the observations that were not selected - if response: - observed[2:] = 0 - else: - observed[:2] = 0 - - # update the probabilistic network - attributes, _ = beliefs_propagation( - attributes=attributes, - input_data=(input_data[i], time_steps, observed), - update_sequence=update_sequence, - edges=edges, - input_nodes_idx=input_nodes_idx, - ) -responses = jnp.asarray(responses) # vector of responses -``` - -### Bayesian inference - -Using the responses simulated above, we can try to recover the value of the tonic volatility. First, we start by creating the response function we want to optimize (see also {ref}`custom_response_functions` on how to create such functions). - -```{code-cell} ipython3 -# create a mask to hide the observations from the arm that was not selected by the participant -observed = np.ones(input_data.shape) -observed[2:, responses] = 0 -observed[:2, ~responses] = 0 -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -def two_bandits_logp(tonic_volatility, hgf, input_data, responses, observed): - - # replace with a new omega in the model - hgf.attributes[8]["tonic_volatility"] = tonic_volatility - hgf.attributes[9]["tonic_volatility"] = tonic_volatility - hgf.attributes[10]["tonic_volatility"] = tonic_volatility - hgf.attributes[11]["tonic_volatility"] = tonic_volatility - - # run the model forward - hgf.input_data(input_data=input_data, observed=observed) - - # probabilities of choosing arm A - beta = 1.0 - w_a = hgf.node_trajectories[4]["expected_mean"] - l_a = hgf.node_trajectories[5]["expected_mean"] - w_b = hgf.node_trajectories[6]["expected_mean"] - l_b = hgf.node_trajectories[7]["expected_mean"] - p_a = jnp.exp(beta * (w_a-l_a)) / ( jnp.exp(beta * (w_a-l_a)) + jnp.exp(beta * (w_b-l_b))) - - # binary surprise given this probability - surprise = jnp.sum(binary_surprise(responses, p_a)) - - # the surprise of the model is the sum of binary surprise at all input level - # plus the binary surprise for the agent decision - surprise += jnp.sum(hgf.node_trajectories[0]["surprise"]) - surprise += jnp.sum(hgf.node_trajectories[1]["surprise"]) - surprise += jnp.sum(hgf.node_trajectories[2]["surprise"]) - surprise += jnp.sum(hgf.node_trajectories[3]["surprise"]) - - surprise = jnp.where( - jnp.isnan(surprise), - jnp.inf, - surprise - ) - return -surprise -``` - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -```{hint} -The response function computes the sum of the binary surprise (see {py:func}`pyhgf.math.binary_surprise`) at the first level of the four binary HGFs, and adds the binary surprise for the observed decision given the probabilities predicted by the sigmoid decision rule. While we are mostly interested in predicting the latter (and could use solely this quantity for optimization), this can produce situations where the binary HGFs are forced toward *extreme* beliefs in a way that overfit the responses from the participant. Adding their surprise in the equation ensures that these situations are limited, as such trajectories are also associated with extreme surprise. As always, we return the negative surprise to get a log probability that can be manipulated by PyMC. -``` - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -We create both a jitted version of this function and the vector-jacobian product (gradient) requiered for a custom Op in PyTensor: - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -logp_fn = Partial( - two_bandits_logp, - hgf=two_armed_bandit_missing_inputs_hgf, - input_data=input_data, - responses=responses, - observed=observed -) -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -jitted_custom_op_jax = jit(logp_fn) - -def vjp_custom_op_jax(x, gz): - _, vjp_fn = vjp(logp_fn, x) - return vjp_fn(gz)[0] - -jitted_vjp_custom_op_jax = jit(vjp_custom_op_jax) -``` - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -The log probability function and the gradient of this function are then passed to a new `Op`, which is a class that can be manipulated inside PyMC graphs. - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' -tags: [hide-cell] ---- -# The CustomOp needs `make_node`, `perform` and `grad`. -class CustomOp(Op): - def make_node(self, x): - # Create a PyTensor node specifying the number and type of inputs and outputs - - # We convert the input into a PyTensor tensor variable - inputs = [pt.as_tensor_variable(x)] - # Output has the same type and shape as `x` - outputs = [inputs[0].type()] - return Apply(self, inputs, outputs) - - def perform(self, node, inputs, outputs): - # Evaluate the Op result for a specific numerical input - - # The inputs are always wrapped in a list - (x,) = inputs - result = jitted_custom_op_jax(x) - # The results should be assigned in place to the nested list - # of outputs provided by PyTensor. If you have multiple - # outputs and results, you should assign each at outputs[i][0] - outputs[0][0] = np.asarray(result, dtype="float64") - - def grad(self, inputs, output_gradients): - # Create a PyTensor expression of the gradient - (x,) = inputs - (gz,) = output_gradients - # We reference the VJP Op created below, which encapsulates - # the gradient operation - return [vjp_custom_op(x, gz)] - - -class VJPCustomOp(Op): - def make_node(self, x, gz): - # Make sure the two inputs are tensor variables - inputs = [pt.as_tensor_variable(x), pt.as_tensor_variable(gz)] - # Output has the shape type and shape as the first input - outputs = [inputs[0].type()] - return Apply(self, inputs, outputs) - - def perform(self, node, inputs, outputs): - (x, gz) = inputs - result = jitted_vjp_custom_op_jax(x, gz) - outputs[0][0] = np.asarray(result, dtype="float64") - -# Instantiate the Ops -custom_op = CustomOp() -vjp_custom_op = VJPCustomOp() -``` - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -We are now ready to sample the model and estimate the value of tonic volatility. - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -with pm.Model() as model: - omega = pm.Normal("omega", -4.0, 5) - pm.Potential("hgf", custom_op(omega)) - idata = pm.sample(chains=4, cores=1) -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -_, ax = plt.subplots(figsize=(8, 4)) -az.plot_posterior(idata, ax=ax, kind='hist') -ax.axvline(tonic_volatility, color="r", label="Tonic volatility") -plt.legend(); -``` - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -## System configuration - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -%load_ext watermark -%watermark -n -u -v -iv -w -p pyhgf,jax,jaxlib -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- - -``` diff --git a/docs/source/notebooks/Exercise_1_Using_the_HGF.ipynb b/docs/source/notebooks/Exercise_1_Using_the_HGF.ipynb index 029fa21de..c050c08da 100644 --- a/docs/source/notebooks/Exercise_1_Using_the_HGF.ipynb +++ b/docs/source/notebooks/Exercise_1_Using_the_HGF.ipynb @@ -2752,9 +2752,6 @@ } ], "metadata": { - "jupytext": { - "formats": "ipynb,md:myst" - }, "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", @@ -2770,7 +2767,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.16" + "version": "3.12.3" } }, "nbformat": 4, diff --git a/docs/source/notebooks/Exercise_1_Using_the_HGF.md b/docs/source/notebooks/Exercise_1_Using_the_HGF.md deleted file mode 100644 index eef5aba4a..000000000 --- a/docs/source/notebooks/Exercise_1_Using_the_HGF.md +++ /dev/null @@ -1,830 +0,0 @@ ---- -jupytext: - formats: ipynb,md:myst - text_representation: - extension: .md - format_name: myst - format_version: 0.13 - jupytext_version: 1.15.1 -kernelspec: - display_name: Python 3 (ipykernel) - language: python - name: python3 ---- - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -(hgf_exercises)= -# An introduction to Hierarchical Gaussian Filters through practical exercises - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/ilabcode/pyhgf/blob/master/docs/source/notebooks/Exercise_1_Using_the_HGF.ipynb) - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' -tags: [hide-cell] ---- -%%capture -import sys -if 'google.colab' in sys.modules: - !pip install phygf watermark -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' -tags: [hide-cell] ---- -import arviz as az -import jax.numpy as jnp -import matplotlib.pyplot as plt -import numpy as np -import pandas as pd -import pymc as pm -import pytensor.tensor as pt -import seaborn as sns -from pytensor import function, scan - -from pyhgf import load_data -from pyhgf.distribution import HGFDistribution -from pyhgf.model import HGF -from pyhgf.response import binary_softmax, first_level_gaussian_surprise -``` - -```{code-cell} ipython3 -# load an example time series for continuous inputs -timeseries = load_data("continuous") -``` - -In this tutorial, we introduce the continuous and binary Hierarchical Gaussian Filters and describe their application in computational psychiatry research. - -We start by reviewing the core principles on which the HGF is built: a generative model of embedded stochastic processes to describe hidden states of the world. In the first part, we review the mathematical description of such operations and how to implement them in Python. - -In the second part, we apply this model to real-world data (weather dataset) by creating an agent that *uses* this model to filter sensory information and update internal beliefs about hidden states of the world. We then try to apply Bayesian inference over some of the agent's parameters. - -In the third part, we introduce the binary Hierarchical Gaussian filter and consider experimental designs familiar in reinforcement learning, where the agent tries to learn the association between stimuli, or the occurrence of binary events. Then again,- we apply Bayesian inference and try to compare the performance of our model with alternative explanations of an agent's behaviours. - -+++ - -## Belief updating under uncertainty: the continuous Hierarchical Gaussian Filter -### Gaussian random walks - -Hierarchical Gaussian Filters are built on a generalisation of the [Gaussian Random Walk](https://en.wikipedia.org/wiki/Random_walk#Gaussian_random_walk) (GRW). A GRW is a [stochastic process](https://en.wikipedia.org/wiki/Stochastic_process) that generate a new observation $x_1^{(k)}$ at each time step $k$ from a normal distribution and using the previous observation $x_1^{(k-1)}$ as its mean such as: - -$$ -x_1^{(k)} \sim \mathcal{N}(x_1^{(k-1)}, \sigma^2) -$$ - -where $\sigma^2$ is the fixed variance of the distribution. - -```{admonition} Exercise 1 -Using the equation above, write a Python code that implements a Gaussian random walk using the following parameters: $\sigma^2 = 1$ and $x_1^{(0)} = 0$. -``` - -+++ - -### Value and volatility coupling between probabilistic nodes - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -We have simulated above a simple GRW. At each time point, this process is fully described by the probability distribution and the sufficient statistics of this probability distribution (the mean and the variance). Using these values, we can also derive expected values (using the current mean) and expected precision (using the current variance). - -The HGF hierarchically generalize this process by making the parameters of a stochastic process depend on another GRW at a different level. In [PyHGF](https://github.com/ilabcode/pyhgf) we use a *nodalized* version of this framework {cite:p}`weber:2023`, and consider that each stochastic process is a node in a network, connected with other nodes through probabilistic dependencies: **value coupling** (targetting the value $\mu$ of the child node) or **volatility coupling** (targetting the volatility $\sigma^2$ of the child node). - -Let's consider for example a network constituted of two nodes $x_1$ and $x_2$, as it is found in the continuous HGF {cite:p}`2014:mathys`. The node $x_1$ is performing a GRW as previously described. We can add a dependency on the mean of the distribution (**value coupling**) by assuming that $x_1$ inherits the difference step from $x_2$, instead of using only its own previous value. Mathematically, this would write: - -$$ -x_2^{(k)} \sim \mathcal{N}(x_2^{(k-1)}, \, \sigma_2^2) \\ -x_1^{(k)} \sim \mathcal{N}(x_1^{(k-1)} + \alpha_{1} x_2^{(k)}, \, \sigma_1^2) \\ -$$ - -Note that this generative process reads top-down: the node higher in the hierarchy ($x_2$) generates new values and passes them to the child nodes. - -We can also arrange things differently, for example assuming that $x_1$ runs the GRW as usual, but this time it is paired with $x_2$ via **volatility coupling**. This means that for state $x_1$, the mean of the Gaussian random walk on time point $k$ is given by its previous value $x_1^{(k-1)}$, while the step size (or variance) depends on the current value of the higher level state, $x_2^{(k)}$. - -$$ -x_1^{(k)} \sim \mathcal{N}(x_1^{(k-1)}, \, f(x_2^{(k)})) -$$ - -where the exact dependency is of the form - -$$ - f(x_2^{(k)}) = \exp(x_2^{(k)} + \omega_1) -$$ - -At the higher level of the hierarchy (here the second level), the nodes are not inheriting anything from their parents anymore, and only rely on their own variance: - -$$ -x_2^{(k)} \sim \mathcal{N}(x_2^{(k-1)}, \, \exp(\omega_2)) -$$ - -```{hint} -Here the volatility is not simply inherited from the higher node, it is mixed with another quantity ($\omega_2$). This is because we don't want the parent node to explain all the variance alone, the child node ($x_1$) also has a parameter for its own variance and expects some variability by itself. The parent node can weigh on this by adding or removing variance in the final quantity. $\omega$ is sometimes refered to as the *tonic* part of the variance, or the *evolution rate*, while $x_2$ is the *phasic* part of the variance. -``` - -```{admonition} Exercise 2 -- Using the equation above and your previous implementation, write a Python code that implements a hierarchical Gaussian random walk with the following parameters: $\omega_1 = -6.0$, $\omega_2 = -6.0$, $\mu_1 = 0.0$, $\mu_2 = -2.0$, $x_{1} = 0.0$ and $x_{2} = -2.0$ - -- What happens when we change the values of $\omega_1$? - -- What happens when we change the values of $\omega_2$? -``` - -+++ - -### The continuous Hierarchical Gaussian Filter - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -Hierarchical Filters are built on the notion that we can embed stochastic nodes and make them depend on each other and use this as a generative model of hidden states of the world. - -We therefore want to create agents that can use this principle to filter the sensory inputs they receive. But this time we have to think the other way. We do not want to generate data top-down (as in the function you wrote above), we already have the data in the form of sensory inputs. We want to provide this sensory input to the model and update the probabilistic nodes accordingly so that they continue to predict the next sensory input reasonably well. - -This requires propagating updates on sufficient statistics and sending precision-weighted prediction errors to the parent nodes. The pyhgf package implements this process with the corresponding update equation so the model can take as input a time series and infer the more likely generative structure that created the values. This can be extremely useful if you want to work with time series that have varying levels of volatility (i.e. meta-volatility). In the following example, we illustrate how we can use the Hierarchical Gaussian Filter to filter and predict inputs in a continuous node. - -```{code-cell} ipython3 -# create a two-levels continuous HGF -two_levels_continuous_hgf = HGF( - n_levels=2, - model_type="continuous", - initial_mean={"1": 1.04, "2": 0.0}, - initial_precision={"1": 1e4, "2": 1.0}, - tonic_volatility={"1": -6.0, "2": -2.0}, -) -``` - -```{code-cell} ipython3 -# plot the implied probabilistic network -two_levels_continuous_hgf.plot_network() -``` - -```{code-cell} ipython3 -# add new observations -two_levels_continuous_hgf = two_levels_continuous_hgf.input_data(input_data=timeseries) -``` - -```{code-cell} ipython3 -# plot the trajectories of the model beliefs -two_levels_continuous_hgf.plot_trajectories(); -``` - -```{code-cell} ipython3 -# return the sum of surprise at the input node -two_levels_continuous_hgf.surprise() -``` - -| parameter | description | -| --- | --- | -| $\mu_i$ | The mean of the distribution in node $i$. | -| $\pi_i$ | The precision of the distribution in node $i$. the precision is the inverse of the variance ($\frac{1}{\sigma^2}$) | -| $\omega_i$ | The evolution rate, or the tonic part of the variance of the distribution in node $i$. | - -+++ - -```{admonition} Exercise 3 -$\omega$ represents the tonic part of the variance (the part that is not affected by the parent node). Using the code example above, create another model with different values for $\omega$ at the second level. What is the consequence of changing this value on the belief trajectories? What is the "best" model in this context? -``` - -+++ - -### Parameters optimization - -+++ - -So far we have been running the HGF forward by fixing the values of the parameters beforehand. This is an important part of the modelling process as we can retrieve the belief trajectories, which indicates how the agent might use sensory information to adapt to the environment, as well as the surprise associated with these observations, which indicates *how well* the agent adapted to the environment. - -In the context of analysing data from a task, we do not want to fit the data manually and retrieve the trajectories from every possible set of parameters. Instead, we want to perform Bayesian inference over these parameters and estimate the posterior of the probability distribution. Here we are going to perform this step using Hamiltonian Monte Carlo sampling as implemented in PyMC. - -```{code-cell} ipython3 -hgf_logp_op = HGFDistribution( - n_levels=2, - input_data=[timeseries], - response_function=first_level_gaussian_surprise -) -``` - -```{code-cell} ipython3 -with pm.Model() as two_level_hgf: - - # tonic volatility priors - tonic_volatility_1 = pm.Uniform("tonic_volatility_1", -20, -2.0) - - # HGF distribution - pm.Potential("hgf_loglike", hgf_logp_op( - tonic_volatility_1=tonic_volatility_1, - tonic_volatility_2=-2.0, mean_1=1.0 - ) - ) -``` - -```{code-cell} ipython3 -pm.model_to_graphviz(two_level_hgf) -``` - -```{code-cell} ipython3 -with two_level_hgf: - idata = pm.sample(chains=4) -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -az.plot_trace(idata); -``` - -```{code-cell} ipython3 -az.summary(idata) -``` - -### Practice: Filtering the worlds weather - -+++ - -In the previous section, we introduced the computational concept behind the Hierarchical Gaussian Filter and illustrated: - -1. How to create Gaussian Random Walks with hierarchical dependencies. -1. How to fit the HGF to a time series with continuous inputs. -2. How to find the posterior distribution over some parameters given sensory data and a response function. - -For the time left before the break, you are going to apply this knowledge to a more practical context: filtering the world's weather. We will use data from {cite:p}`pfenninger:2016, staffell:2016` that is made available at [the following database](https://renewables.ninja/). This database contains hourly recordings of various weather parameters that have been tracked over one year at different positions in the world. The data from Aarhus can be loaded using the following function call: - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -aarhus_weather_df = pd.read_csv("https://raw.githubusercontent.com/ilabcode/hgf-data/main/datasets/weather.csv") -aarhus_weather_df.head() -``` - -The data frame contains the following parameters, recorded every hour over the year of 2019: - -| parameter | description | -| --- | --- | -| t2m | The 2-meter above ground level air temperature | -| prectotland | The rain precipitation rate (mm/hour) | -| precsnoland | Snow precipitation rate (mm/hour) | -| snomas | Total snow storage land (kg/m2) | -| rhoa | Air density at surface (kg/m3) | -| swgdn | Surface incoming shortwave flux (W/m2) (considering cloud cover) (The value at the surface is approximately 1000 W/m2 on a clear day at solar noon in the summer months) | -| swtdn | Toa (top of atmosphere) incoming shortwave flux (W/m2) | -| cldtot | Total cloud area fraction. An average over grid cells and summed over all height above ground ([0,1] scale where 0 is no cloud and 1 is very cloudy) | - -```{code-cell} ipython3 -# Load time series example data -timeserie = aarhus_weather_df["t2m"][:24*30].to_numpy() - -# This is where we define all the model parameters - You can control the value of -# different variables at different levels using the corresponding dictionary. -hgf = HGF( - n_levels=2, - model_type="continuous", - initial_mean={"1": timeserie[0], "2": .5}, - initial_precision={"1": 1e4, "2": 1e1}, - tonic_volatility={"1":-6.0, "2": -3.0}, -) - -# add new observations -hgf.input_data(input_data=timeserie) - -# visualization of the belief trajectories -hgf.plot_trajectories(); -``` - -```{admonition} Exercises 4 -- Select a city and download a recording OR use the data frame loaded above. -- Fit an agent using one of the variables and compute the posterior probability over this parameter. -``` - -+++ - -## Bayesian reinforcement learning: the binary HGF - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -In the first part of the tutorial, we introduced the continuous Hierarchical Gaussian Filter and detailed how it is built on top of hierarchical GRW. This model is intended to work with continuous input variables. In this regard, it can be seen as a generalization of the Kalman filter. However, several experiments require working with variables that have discrete states. - -The binary HGF can be seen as an extension of the continuous HGF, with the exception that it has a binary input node except one a continuous one. Handling such binary variables can be useful, for example for reinforcement learning paradigms where we want the agent to learn the association between two states in the world. Such experiments, like the [one-armed badit task](https://en.wikipedia.org/wiki/Multi-armed_bandit) that we will be using below, generally produce two types of information at each trial: -- the action $y$, as a boolean variable, that is registering the decision made by the agent at time $t$, at the beginning of the trial. -- the observation $u$ about the association between the stimuli and the outcomes, as a boolean (e.g. `1` if Stim_1 -> Outcome_1 and Stim_2 -> Outcome_2, `0` otherwise). - -We can load an example dataset from {cite:p}`Iglesias2021` using the following command that will return a vector of observations $u$ and a vector of decision $y$. - -```{code-cell} ipython3 -u, y = load_data("binary") -``` - -### The binary Hierarchical Gaussian Filter - -+++ - -Fitting data to a binary HGF is quite similar to the continuous one (note that ```model_type="binary"```). - -```{code-cell} ipython3 -two_levels_hgf = HGF( - n_levels=2, - model_type="binary", - initial_mean={"1": .0, "2": 0.0}, - initial_precision={"1": np.nan, "2": 1.0}, - tonic_volatility={"2": -5}, -) -``` - -This is a two-level binary HGF, so we have one continuous node predicting the outcomes of a binary state node. - -```{code-cell} ipython3 -two_levels_hgf.plot_network() -``` - -The observations about the associations are provided as input data and will be the sensory information the agent uses to update its beliefs. - -```{code-cell} ipython3 -two_levels_hgf = two_levels_hgf.input_data(input_data=u) -``` - -The node trajectories illustrate how new binary outcomes change the expectations about the associations between the stimuli. - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -two_levels_hgf.plot_trajectories(); -``` - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -We now have a model with beliefs trajectories and we want to see how these beliefs can explain the behaviour of the participant. This is where we will use the decision vector $y$ together with a response model. Designing response models that are adapted to the task is a central part of the modelling process (you can read more on this in the {ref}`custom_response_functions` section). Here, we use the `binary_softmax`, which means that we assume the expected probability at the first level of the model predicts the decision. - -```{code-cell} ipython3 -two_levels_hgf.surprise( - response_function=binary_softmax, # the response model - response_function_inputs=y # the decision vector -) -``` - -Once we have these two piece of information, we are ready to compute the surprise, which will indicates how well our model could prediction the behavior of the participant. - -+++ - -```{hint} -The actions, or decisions, initiated by the agent are not influencing the way beliefs about the hidden states of the world are being updated (this is NOT active inference). This is for sure a limitation of the model, but it also means that the belief updating and the response model can be processed separately. In other words, no matter what actions the agent is taking along the way, this will not change the way sensory evidence is updating beliefs. -``` - -+++ - -```{admonition} Exercises 5 -- Using the examples above, can you diagnose the performances of the agent? -- What could make it better? -- Can you try to change the parameters and get an agent with better performances (i.e. minimizing the surprise)? -``` - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -### Model comparison - -When modelling, we always want to control for alternative, simpler explanations. It might be that our subjects are dynamically updating their beliefs in accordance with our assumptions. However, sometimes, they might just be responding rather randomly and not show much learning at all. It might also be that they are using a simple learning model that does not require to use of the HGF to capture higher-order volatility. We want to analyse the data using all these models and compare how well they can explain the participant's responses. - -#### Biased random - -To control for this possibility, we define the simpler alternative model below. This model just takes random actions with a single fixed probability. It does not integrate the data from the task at all. Here, we write our models using [PyTensor](https://pytensor.readthedocs.io/en/latest/), which is the tensor library on which [PyMC](https://www.pymc.io/welcome.html) is built, and sample the model the same way. We start by creating a log probability function, that measures the model error when observing the response data. Note that we do not need the observation $u$ here, as our assumption is that the agent is not using it. - -```{code-cell} ipython3 -def logp(value, action_probability): - - responses = pt.as_tensor_variable(y, dtype="int32") - - # compute the log probability associated with the actual responses - logp = pt.sum(pt.log(pt.power(action_probability, responses) * pt.power((1 - action_probability), 1-responses))) - - return logp -``` - -```{code-cell} ipython3 -with pm.Model() as biased_random_model: - y_data = pm.ConstantData("y_data", y) - bias = pm.Beta("bias", 1.0, 1.0) # a simple bias toward deciding 1 vs. 0 - biased_random = pm.DensityDist('biased_random', bias, logp=logp, observed=y_data) -``` - -```{code-cell} ipython3 -with biased_random_model: - biased_random_idata = pm.sample(chains=4, idata_kwargs={"log_likelihood": True}) -``` - -```{code-cell} ipython3 -az.plot_trace(biased_random_idata); -``` - -Assess model fitting, here using leave-one-out cross-validation from the [Arviz](https://python.arviz.org/en/stable/api/generated/arviz.loo.html) library. - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -%%capture --no-display -az.loo(biased_random_idata) -``` - -#### Rescorla-Wagner - -+++ - -Another popular model in reinforcement learning is the [Rescorla-Wagner model](https://en.wikipedia.org/wiki/Rescorla%E2%80%93Wagner_model), which assumes that the agent uses prediction errors from the previous observation to update its beliefs. Here we create a simple Rescorla-Wagner and try to optimize the learning rate to predict the agent decisions. - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -def rw_update(new_observation, new_response, current_belief, current_action_probability, learning_rate): - """The decision and update step at time t.""" - - # pass previous belief through softmax to get action probability - action_probability = 1 / (1 + pt.exp(-current_belief)) - - # compute the error associated with the actual responses - error = new_response * pt.log(action_probability) + (1 - new_response) * pt.log(1-action_probability) - - # sigmoid transform the previous beliefs at t-1 (into [0,1]) - transformed_old_value = 1 / (1 + pt.exp(-current_belief)) - - # get the new value using the RW update - new_belief = current_belief + learning_rate * (new_observation - transformed_old_value) - - return new_belief, error -``` - -```{code-cell} ipython3 -def logp(value, learning_rate): - """Compute the sum of log probabilities.""" - - observations = pt.as_tensor_variable(u, dtype="int32") - responses = pt.as_tensor_variable(y, dtype="int32") - outputs_info = pt.as_tensor_variable(np.asarray(0.0, observations.dtype)) - curret_belief = pt.zeros(1, dtype="float") - error = pt.zeros(1, dtype="float") - - results, updates = scan( - fn=rw_update, - sequences=[observations, responses], - non_sequences=[learning_rate], - outputs_info=[curret_belief, error] - ) - - _, error = results - - # compute the log probability associated with the actual responses - logp = pt.sum(error) - - return logp -``` - -```{code-cell} ipython3 -with pm.Model() as rw_model: - y_data = pm.ConstantData("y_data", y) - lr = pm.Normal("lr", 0.0, 2.0) # learning rate - hgf = pm.DensityDist('hgf', lr, logp=logp, observed=y_data) -``` - -```{code-cell} ipython3 -with rw_model: - rw_idata = pm.sample(chains=4, idata_kwargs={"log_likelihood": True}) -``` - -```{code-cell} ipython3 -az.plot_trace(rw_idata); -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -%%capture --no-display -az.loo(rw_idata) -``` - -We can visualize the belief updating using this model as: - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -learning_rate = az.summary(rw_idata)["mean"].lr - -def rw_update(new_observation, current_belief): - - # sigmoid transform the beliefs at t-1 (into [0,1]) - transformed_old_value = 1 / (1 + np.exp(-current_belief)) - - # get the new value using the RW update - new_belief = current_belief + learning_rate * (new_observation - transformed_old_value) - - return new_belief - -beliefs = [0.0] -for i in u: - new_belief = rw_update(i, beliefs[-1]) - beliefs.append(new_belief) -beliefs = 1 / (1 + np.exp(-np.array(beliefs))) -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' -tags: [hide-input] ---- -plt.figure(figsize=(12, 3)) -plt.plot(beliefs, label="R-W belief updates") -plt.scatter(np.arange(len(u)), u, alpha=.4, edgecolor="k") -plt.legend() -sns.despine() -``` - -#### Two-level HGF - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -Finally, we can model the behaviour using the two-level or the three-level HGF. The two-level model should generate prediction similar to what we have with the Rescorla-Wagner model, while the three-level HGF is adding a layer of volatility and therefore could take advantage of higher-level dynamics of volatility. - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -hgf_logp_op = HGFDistribution( - n_levels=2, - model_type="binary", - input_data=[u], - response_function=binary_softmax, - response_function_inputs=[y], -) -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -def logp(value, tonic_volatility_2): - return hgf_logp_op(tonic_volatility_2=tonic_volatility_2) -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -with pm.Model() as two_levels_binary_hgf: - y_data = pm.ConstantData("y_data", y) - tonic_volatility_2 = pm.Normal("tonic_volatility_2", -5.0, 2.0) - hgf = pm.DensityDist('hgf', tonic_volatility_2, logp=logp, observed=y_data) -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -with two_levels_binary_hgf: - two_levels_idata = pm.sample(chains=4, cores=1, idata_kwargs={"log_likelihood": True}) -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -az.plot_trace(two_levels_idata); -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -%%capture --no-display -az.loo(two_levels_idata) -``` - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -#### Three-level HGF - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -hgf_logp_op = HGFDistribution( - n_levels=3, - model_type="binary", - input_data=[u], - response_function=binary_softmax, - response_function_inputs=[y] -) -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -def logp(value, tonic_volatility_2): - return hgf_logp_op( - tonic_volatility_2=tonic_volatility_2, - tonic_volatility_3=-6.0, - mean_3=1.0 - ) -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -with pm.Model() as three_levels_binary_hgf: - y_data = pm.ConstantData("y_data", y) - tonic_volatility_2 = pm.Normal("tonic_volatility_2", -5.0, 2.0) - hgf = pm.DensityDist('hgf', tonic_volatility_2, logp=logp, observed=y_data) -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -with three_levels_binary_hgf: - three_levels_idata = pm.sample(chains=4, cores=1, idata_kwargs={"log_likelihood": True}) -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -az.plot_trace(three_levels_idata) -plt.tight_layout(); -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -%%capture --no-display -az.loo(three_levels_idata) -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -%%capture --no-display -az.compare( - { - "biased_random": biased_random_idata, - "RW": rw_idata, - "two-level": two_levels_idata, - "three-level": three_levels_idata - } -) -``` - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -### Posterior predictive sampling - -+++ - -Another way to assess model fitting is to use a posterior predictive check (i.e. we want to ensure that the posterior distribution would be well suited to predict the data at hand). This is usually done by sampling from the posterior distribution and comparing it with the observations. We can do something that approaches this procedure by sampling the parameters of the HGF from the posterior distribution obtained in the previous steps and plotting the resulting trajectories. We can retrieve the parameters of the posterior distributions from our previous fit: - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -And use them to sample new parameters from the same distribution and plot the beliefs trajectories accordingly: - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' -tags: [hide-input] ---- -fig, axs = plt.subplots(nrows=4, figsize=(12, 7)) - -for _ in range(20): - - tonic_volatility_2 = np.random.normal( - az.summary(three_levels_idata)["mean"].tonic_volatility_2, - az.summary(three_levels_idata)["sd"].tonic_volatility_2 - ) - - three_levels_df = HGF( - n_levels=3, - model_type="binary", - initial_mean={"1": .0, "2": 0.0, "3": 1.0}, - initial_precision={"1": .0, "2": 1.0, "3": 1.0}, - tonic_volatility={"2": tonic_volatility_2, "3": -6.0}, - verbose=False - ).input_data(input_data=u).to_pandas() - - axs[0].plot( - three_levels_df.time, - three_levels_df.x_3_expected_mean, - alpha=.4, - linewidth=.5, - color="#c44e52" - ) - - axs[1].plot( - three_levels_df.time, - three_levels_df.x_2_expected_mean, - alpha=.4, - linewidth=.5, - color="#55a868" - ) - - axs[2].plot( - three_levels_df.time, - three_levels_df.x_1_expected_mean, - alpha=.4, - linewidth=.5, - color="#4c72b0" - ) - - - axs[3].plot( - three_levels_df.time, - three_levels_df.total_surprise, - alpha=.4, - linewidth=.5, - color="#2a2a2a" - ) - -axs[2].scatter( - three_levels_df.time, - three_levels_df.observation_input_0, - alpha=0.4, - edgecolor="k" -) -axs[3].set_title("Surprise", loc="left") -plt.tight_layout() -sns.despine() -``` - -+++ {"editable": true, "slideshow": {"slide_type": ""}} - -# System configuration - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- -%load_ext watermark -%watermark -n -u -v -iv -w -p pyhgf,jax,jaxlib -``` - -```{code-cell} ipython3 ---- -editable: true -slideshow: - slide_type: '' ---- - -``` diff --git a/requirements-docs.txt b/requirements-docs.txt index 30d403e9c..f17a40f8a 100644 --- a/requirements-docs.txt +++ b/requirements-docs.txt @@ -10,4 +10,5 @@ graphviz watermark systole>=0.3.0 sphinx-togglebutton -sphinx-design \ No newline at end of file +sphinx-design +ipywidgets \ No newline at end of file diff --git a/src/pyhgf/distribution.py b/src/pyhgf/distribution.py index ab4487a50..3b77337a8 100644 --- a/src/pyhgf/distribution.py +++ b/src/pyhgf/distribution.py @@ -448,7 +448,7 @@ class HGFDistribution(Op): Sample the model - Using 4 chain, 4 cores and 1000 warmups. >>> with model: - >>> hgf_samples = pm.sample(chains=4, cores=4, tune=1000) + >>> hgf_samples = pm.sample(chains=2, cores=1, tune=1000) Print a summary of the results using Arviz