From f31441d4909831db10f8ed0660501a4604f18c4d Mon Sep 17 00:00:00 2001 From: Denis Mulyalin Date: Sun, 29 Sep 2024 09:37:40 +1000 Subject: [PATCH] Deployed d44f3ee with MkDocs version: 1.6.1 --- 404.html | 4 +- NFP/index.html | 4 +- images/ArcOverview.drawio | 2 +- images/Nornir_Service.jpg | Bin 31599 -> 41658 bytes index.html | 4 +- netbox_service/index.html | 4 +- netbox_worker_api_reference/index.html | 12 +- nfcli_client_api_reference/index.html | 4 +- norfab_architecture/index.html | 4 +- norfab_broker_reference/index.html | 6 +- norfab_cli_overview/index.html | 4 +- norfab_client_reference/index.html | 3590 ++++++++++++++++++++++++ norfab_exceptions_reference/index.html | 4 +- norfab_getting_started/index.html | 4 +- norfab_installation/index.html | 4 +- norfab_inventory/index.html | 4 +- norfab_inventory_reference/index.html | 6 +- norfab_nfapi_reference/index.html | 4 +- norfab_python_api_overview/index.html | 4 +- norfab_release_notes/index.html | 4 +- norfab_rest_api_overview/index.html | 4 +- norfab_worker_reference/index.html | 2743 ++++++++++++++++++ nornir_service/index.html | 4 +- nornir_worker_api_reference/index.html | 8 +- objects.inv | Bin 751 -> 999 bytes search/search_index.json | 2 +- sitemap.xml | 8 + sitemap.xml.gz | Bin 362 -> 372 bytes 28 files changed, 6391 insertions(+), 50 deletions(-) create mode 100755 norfab_client_reference/index.html create mode 100755 norfab_worker_reference/index.html diff --git a/404.html b/404.html index e434caa..199f3ad 100755 --- a/404.html +++ b/404.html @@ -846,7 +846,7 @@
  • - + @@ -867,7 +867,7 @@
  • - + diff --git a/NFP/index.html b/NFP/index.html index ecce658..bb19c48 100755 --- a/NFP/index.html +++ b/NFP/index.html @@ -1336,7 +1336,7 @@
  • - + @@ -1357,7 +1357,7 @@
  • - + diff --git a/images/ArcOverview.drawio b/images/ArcOverview.drawio index 83812b0..6ad479e 100755 --- a/images/ArcOverview.drawio +++ b/images/ArcOverview.drawio @@ -1 +1 @@ -3Vhdb5swFP01eVzFRwzktUnWPmyqtGhr++jCDXhxMDImIfv1M2AXCF1KpUbA8hLu8Sfn3ONrMbOX+/yO4yT6zgKgM8sI8pm9mlnyhyz5VyCnCvEW8woIOQkqyKyBDfkDCjQUmpEA0lZHwRgVJGmDPotj8EULw5yzY7vbltH2qgkOoQNsfEy76CMJRKTeAhk1fg8kjPTKpqFa9lh3VkAa4YAdG5C9ntlLzpionvb5EmhBnualGvf1H62vG+MQiz4D/HCFnHv/KfMwYetHatz++vnFVPKk4qTfGAJJgAoZFxELWYzpukZvOcviAIppDRnVfb4xlkjQlOBvEOKk1MSZYBKKxJ6qVsiJeCqG3yAVPTdaVrmauQxOOogFP1WDLKTj52ZjPa6M9MAti4XaienIuMubojJlGffhAlkqcwXmIYhLpKoJCyYbKyhZ7oDtQW5QduBAsSCHdqphlbHha79aVPmgdP2AxmrbB0wztdKSkuLlO8pTKm1VKHyMiIBNgks6jtLZbf16U3oALiC/yIE+KbRv1Dlh2So+1q4ztZWihuM840q0oQ5tG+AHIik55632gzkK7pDTkzvnWtw5k+HOdsbGnTsZ7pA1Nu5Me4LlbKBq5k2zmnlTqGbnp8rw1cycT9AZ7kDWWEzTGospWOO8aIzAGmhYa1j9vfF5Ka5T990cR6NKcdOZ4DE20CnWW2JnXBK7w0rs/pcau+PSWJeNulRJCXfAO9K/U6BwmlRf/bYkL+S/zmXOukGtmmV7b9QsXUai1udAdC0Gu/fgHxCSVM/d5DCNcFI8xkz0qPkv2N+FpZ8eMkFJDAoPMN89yFFElGl8Y6AzuuefQ/fcbV8Q7IWp6W/Q7b5xQ1hYHyZbhvUX2LKt8R3bXv8F7V1Zc5s6FP41nrn3IRn25dHZukzSukl72z51sJFtGoxcISdxf/2VQGITxiQGgxO3D7YOSOCzfDr6dCAD9Xzx9A45y/kNdIE/UCT3aaBeDBRF1iSFfFDJOpZYthYLZshz2Ump4M77C5hQYtKV54IwdyKG0MfeMi+cwCAAE5yTOQjBx/xpU+jnr7p0ZkAQ3E0cX5R+91w8Z79Cl1L5e+DN5vzKssSOLBx+MhOEc8eFjxmRejlQzxGEOP62eDoHPlUe10vc72rD0eTGEAhwnQ5/z/5bjd79+f1x9NOZflj+/jFaP5ywUR4cf8V+8Lnv0QHjW8ZrrgcyFlE5aZw9zj0M7pbOhB55JFYnsjle+KQlk69TGGBmRtkgbfE2+TUBwuApI2K3/Q7ABcBoTU7hRyWD6ZA5kWyqcfsxNYnM9TzPmMNiMod5wSwZO1UU+cJ09Qy9KaKCXOI3rAkRnsMZDBz/MpWeIbgKXOAyPaXnXEO4ZMLfAOM1056zwjCvWvDk4R/ku3Sq6Kz5M3PogqpL4o01bwTk98a9dN78mT2WdotavN9Gu4VwhSagQjcswLGDZgBX+Z4en0g1V+kGCPgO9h7yEVlm0qjrECFnnTlhCb0Ah5mRR1SQ9S477106866rjR0Mu6oD+RLfQ+peyY95ucep3Xqc+So8zuiHx8n6Mz0uAbwXd2DTbqsuqgmTyXeI7gESPDfvlz2YUHSp5oRitDWh6AejO9Xqm+6MnaBR2hUa20fGohlDgnR4SLNbIglgAHZCT0WtC5920/C5k9lNIWT6mLsm+X9vcler20ziBeEi9yhY9MMMFvsggqU4t3QfLJyH6ChaasdK0Rr7T8JVu25kmE1HxsuycKWwjNOMbUl1AcoLHdpJqmV5JwfcObvZw8KvXcSWrZp+2TgdsRvwdEoxyfs0euXs0SDuWD3BHVt+Hu5oWhe4owoZA3GvshXpljzBCZcxMT/1nqhrtpRlK6d6TkmqUpI6cMpxnqPs9bZCWCREbgHzZ8VwFlRRwTikH58RTcaIeugUY/jk1s7GRNPGjH67A+jBo30KiifKwXldh5ia6Bz6EKXQOfV8vyByfG8WkOaEXpTIz6iqvYnjD9mBhee6/qYkMD+7ZM1p8ja7yZJNiufbVrbzltXVEsuaJUmh2lpSKLI1KDHsS/NpIU6a0J1UgA7OA2Z1p5ui6risedUZh6I6Ars9U51IeHCscOg9IDCNpMafFYxxAdNN0iG9qnKFppPTvwDBxZ9TiGZEQHVGPhQrOq7iOThxgeMDdBLCyT3AJ3i9BOloHIwuLofXl7f8wqSvw4+IwPUekERuDJwdVplVs0dTk4cs5QFGs2zB0upe8cXqn6VTq5bZ/kAsrSl9s7TIyzRsaTJZ422Wvv387WvdmP6yAqsmoboFKytGIRWULJ4cdmVnpVsa6UV7FDus5oA/jhbvPK2kd7paLDNnlK3xI9mVRzW7IxeVlAodGEubOHt2xRANvAmEm0IJ8GcLRFx+KceHPgNBcWJXVKtjGFBK4L5Ldm//5B4I3MaoPbNmlHMqoGHmp4TaKexfkLVqfoz4J7FuVZxPcaSEzuAjxT9aGKkp+kcR6R8BiTJey+FiDJFLso1JTDcMowHQPycnWfm/MawUAGZ/gNbobR8kLhYTpO5xUWTKDhAXzd4AY11KXGmnBHM7hf1yYKTJvLmB6t0XNor03xEbXws2FimC7rFRZEw/fb69GlJrjRDEkFhgQNeXxLzSp6uRoN7+bRNYgza2CURgkHn1RtZ2WrJTtB/zlbG2BQvl7UGi1I3+UUvxh0ZSs/HHUGSBIXDGIfRXGAzRhGuaSpOWUjBD5Rr7WUy5Vdh802xxi8Ys1TtfgDevd5FD/QjHoah7AoOTOXNS1wnniceyR4qccXSqlA+hbLQQg5k6/b8ptIgVl3SMxdOMPiV1OpssldMlQKEXYqL3X64X3v8KA2cZziMAJebA7Dq+Mwb+CIYe9iCNOBSrLonE68LxJCJ57PpgiqsiNyRB6wWz6+i0C73gIkqpi1Q7+nbHYV4ilbFySklsMtmOSUjeRZOdKD4AnE5D0E6+UEbyvi4I0IzeAQBH9CMA7AsA7Npu88YAQBVp3dcGACqfb/maTCsp0tBK4t9sq/qGO01G6ayQhmCAdO5gx4czkViPH7uhp1yT4BSPfwgeiJpg9OOTwpziSTc3H6oOX3nE6sT6G+p6jojUECKp9f24ApEUu2zeSqUHi0oixfvaUKmYlpQ9T2CVLObbS0pEovOrE94fs5L2MECt7TVvLSsRaczXFv8kLTntGwKIdN4RAdpFAL2237w1BHj93GTxYYvu41/kJY/x3278H4nJLY9OZXwxU8jbdslZZWFqsxuRFfWthajr1V5ksU5DS59y6mo3UhNZ1Sb8wk79YrkaV5cijr6VlqCLDntJmRoRWHtlYskw8ybuuiJd2+1546jIJl/q223hclqtWKMOxxoUKpMnvhOG3oSLWXFydCNiibNYwUMkjdQza2yi2VrQozVe0LObM4kc7A0MPAxRO9NLBkbCbTBytwlGDgkuOq9O4TR/P+AiAxYpdLRXz9xbuKj7Qjw+m/cFLkR29I5gRckOyhEs6qSPUh4s1JInWPcLFmWM4zF5bM7AJU/G79fAIrl5NPAuu+qFV9NqVudPMmpmnyb83PogmeO3TfhKbsZP3i5XY8bn7XKGrV4msGXaL8kUdsgEOnv5UdT12e/GUa18GcnWF91qslTVYdDOi25FDrenL7FQdbuwAdb5ayw0+/ABxHwzAMLfVH0oAKLZyjMBxOwAQHSRRe0pgGiGuj8AoTlg8vcwYl2nf1VEvfwf7V1bc+K4Ev41VO15CGVdLMmPDGFmdk92SIWZnT1PWw444B2CWWOSsL/++CZAahEM2MZDwkMKy3Zj903dX7eUFuk+vnwK3fnk92DkTVvYGr20yHULY9vG8d9kYJUNCIdmA+PQH2VDaDMw8P/18kErH136I2+hXBgFwTTy5+rgMJjNvGGkjLlhGDyrlz0EU/VX5+7YAwODoTuFo9/9UTTJ38K2NuOfPX88kb+MrPzMoysvzgcWE3cUPG8NkV6LdMMgiLJvjy9db5rwTvIlu+/jjrPrBwu9WVTkht96X594Z3H/+Z9p4PfIlegs/7jKqTy502X+wvnDRivJAW826iSMjI+GU3ex8Ict8mESPU7jARR/DYPlbOQlP2LFR/HVH/2pPLeI3DCSd8+CmSfH8muSO+B75A/ljRTR5G/1yQsevShcxRc8bwQi5THZkoUcC72pG/lPqkDdXC/Ga3LrX7gN/PhJsJWrMHIwbdvZXbkSE0u0ua0SWgTLcOjl926LQSOHLeoUIBczaexFgFz8ZYsFm6FU1gfIHYpZFWMQRpNgHMzc6U0QzHNp/u1F0So3UXcZBaoeeC9+9Gdye/x22dH/ts5cv0gFSQ5WG23RtGOjP1aqK2Hww+sG0yBMn5Kw9LM+I00SnaRHpyuIKk9BrULCLKJq2HHauJiqlaUb5N0n7BN5YsSKmDg71h0gjjWRA1oV+wK6X96qPJ8nfuQN5u4wOfscT/uq8EfuYpJe+6pVPnlh5L28Ksu1VXBLYRASeO0vt+SNbQEF7ojdslUYeSjXbMC17tT3UtHOw2DoLRaAi/H7RiqrVP+WG8NDbAfakDv1x7PE2OIf8OLxDwn3/DhK6eQnHv3RKPkZo2xU6T0Es2iQP5QhiDhCPIyr4uGOSTwIESgeYlUkHvbuxA52YlRo0jjAidHXKVXswjiQ9qDzR6+F2TQxuPsw/jZOvt31Brf9L4PeaQ6uBKPBFsMqx5jRp3FmsBm7be+W9UlWI6qcCkAw56SfkhiKkcpQWwjATSEM1hRPwBVx07kcbhIk1mHotndyTAy1EUhnSuOpxAV+RqZyrjKVcm5i6hoxUJhqkao4WgADOGv8h22Nb0ggozKarLuy+A/t49IlpdBZIPAaN3LtrDrXxpSquUDRXLusSAPB7PjDXf+/vbtXtAHVE1IQoZpJ7DNMZoJM7oVVFYcj+m4mW9ywazITpoWXtZsJTI+/9+9SM9Fj8nmwSJ48moSeOzq/FTGriVYE09nf+vHrWh9v+t8HgGmnAw1T7yEqE2bIjzOLxqQkWVEtSkUyOtwWlClErQx2QDATBdI5EHeQzm7L1W0cn9nZneCgREEHVRNEgYmWh9ikKBxuih32E6vaL8IM+7Y/+LpnjqzB7WGq1aSIY3J79RpTgQT6MGPayamm6DtQUUccr+96EGAiVnWdEabrzdB3qvNGkLPrO4aZ+Ofra8CrxcSdJ1+Hq6kfMy0k+zl2n7H35n494A5/jFOm95dRTEbO0otshkZ2WZCHrdUGHGPqzgxs1iHk8th8gZl7QvjWC/2YQ0l96MQooK7KOeZCVY+jK+cxJQa6Kqy6K+e49NK5qc7U+ElMN3qKnDZzEOKMcssWghQSyn5tSejWPZ/BYvmuvPbv4L5Baa2wkcK7RqS18gEuyROf4HVpTV7X0dLm471ubNRta+tz9t4lXHrdv/neVvOKtkWPTxl01TARq9rFQgjlpt+5hg42RcDOnUcIDS6kwjb5VWMdn1XlVguU8YfL8Gk9/1yYRRRR4uMtwi5sETFP3dXWZfPkgsVrD053Pvju59PvsfV74i/Zk5Rrpg50rLH8JeSbz6OlTeeoVRz+jPUyXP25fbB1V3K4uS09kveBaT6b1IzTvA5nK4H6xoi2e8J2WpraWnZ8ACGBiUwdX7mQlV6GOslfEQgS3X37cn7f7mDNtBAz9WiZwItNH3f53DpszUZuiAX6GkG+uVuNGz8FcME0r0jaAjubz9GzATqEbsWhkiyOFOl7/HbTANRVyBxHso8YO6Bq7nokELbZldQnfrUxST2xkBr6NyKpJ+8dHwbdqjqpJxYvCUqNKbG2XEpxPiyVwA6S16a5S8hbLEloJU1UFGL5/jIis2pHSglEYj71GjAFYapFBo4xYa+18EdK7xox1Q2OSaVOcHxOs4zL1ooSjOGjQQHgbE3EqravAiDPAWnATkHvTSabJmjg+xgqJfTXuyn20K1a/LA3phHuVe+rYNQY4dfqXinEF050rzs51RQj0IsCTBQuxRTwdqL2/QooBD2+B+EPL2y9teW22Gbqqk3EjUuz6l1uK33u3qJNY5AIrW7DOGrA+kt6WAPJvkl9B5p3LD5ugAqLYIJ7IwhaFI4+UwTBceHs1+Q8NWKGVLpq5wkRmSpKNUXWxO8DZY6fWMvGTzhGReVkIGbvJ1YxfkIhfpL53kGTl8U7XMXXuWWb3HK94eNhPSVvs+CiY1dchx+Pxq5iSrU7TIjHGGxnS/7sn2WQDgfhyAuvhlk42UlJhL9cXW2P/ycVp7yjeSZIiO676PkzOAh2ZOvfbvr9W8CyCw38Ha3aQ7mx2sNrFQyEITq3UCIxJX++2MW3LUG5i3m2i+OD/5JwspT9o4SwdcYBrpn2DqhsBYJ9GCBRHXZnN2wW0TE2U4f3z9QubkPYIivQW41oXmRCjbaohc/evCg3jK0+2jKkKKfVeiWfmmJNeky23iDi9JWzAm7xU7UplQuNXGYUrsuJEvt4/wmcMaG1Cx0ut+l0u73br03OYdd7Xq/nHXH2HNaGaEAWQGercdN9JADnLjSOpto+OdRcokImQ61OQLt2+Eirem9aPpwYE9Ca5QMhge7Nr70vBk8ULmd/pYv/djqietoDkeOo8z9GxsXX9bYHys6yUtsDG9HqJ1lW+fo9pO2HfPz6PcRRm6n9ULhgg0RpUzxcR/KG20Wlm6lch/Tc4HgdSv6fAdShussdDEIqP/veGUgL5puwdwY7bPHJpe5wgGT7lcyH8QnbUiGtQmwiVnGeJTO9qgHJpshP3/n8JPlZTABitiDrD2b1yhICJU3CHC35fOulBcWXAFWFObIqlqycexFq2R0Ye4sJhfcOLB3XlDBRXbGGsbWi/+2uC9Gns1a+CLK1ypdwgKXVWvlitfVSlI7us2ZtjYkcbSdsSk/YKhAAx1RbelT1pHXYCph60f396+mbpRrAQVLZ0FKCXvB69QI2fsACwKU24sim93X9hQDnXWsRQTae/zTLzfV/ssMpb0CTN4dQRbrdRSPic+QQNT6PVezs8Tk/DHV4k7Vf8F8oOT+hdwa0cxqIVf0/vSAm0RC/ghydO9LPnM1A3psjikRF6nzKKDslYLZ0YvVaB+yMuP51cNv52v38FkIjrUeZESM0Xmt0xCEq0Bwb3JvCvPeuVWmsEPUwGWvjrEzzcYStg+LyjSw+DIPE3WwYH7/U5Pdg5CVX/B8=7Zlbc6IwGIZ/jZfb4Sj2Vnuaqdtpx+npqpNChGwDoSEo7q/fIImIoa27PRB26o1+bxIIz5svn8aBPYmLUwrS6CcJIB5YRlAM7KOBxV+uxd9KZVUpo0OnEkKKgkoya2GGfkMhGkLNUQCzRkdGCGYobYo+SRLos4YGKCXLZrc5wc27piCEijDzAVbVWxSwSDyFa9T6GURhJO9sGqIlBrKzELIIBGS5JdnHA3tCCWHVp7iYQFzCk1yqcScvtG4mRmHC9hlAE/+5YEvHS9OrG/f2In5+fPghrpKxlXxgGPDnFyGhLCIhSQA+rtUxJXkSwPKqBo/qPlNCUi6aXPwFGVsJM0HOCJciFmPRCgvE7srhB66I7rdajgpx5XWwkkHC6KoaZLkyvt9urMetIzlwThImZmIOeaxikwxITn34CiuxkhmgIWSvMbWrjiXJrTsIV04hiSGfIO9AIQYMLZorDYgFG2761Z7yD8LWv7BYTHsBcC7udEvoE6SK87WvpRXLCDE4S8EayZInd9PDEusJiBEuOZ9BvIAM+aCFdxlPCCaUCwGcgxxzdmOAUZhwxecu8KnY44xR8gTVjnOEsaq+aOICUgaLV6nLrUkmqtiYLFvEyzrNTZm70VaKj4xPMspWjJpgVD7gu43aa/3/Ozp32DU6py/o7KFu6Ny+oHMt3dANe1g8O6qdXj9rp/ddO/fZxbqvnaMepqLXUS4e9jMXD79zcZ+y2H0uyp/uXSWjtX82flxSyWR5M6v0SiqzjwcAHe2be1vs6GWx1a3F3n/psauXx+rZwSx/zHyKHlsK5BslEWRpdZI6R0W5BD7nd7B14Dbqlj1qqVumq9atjfgeiuckffAz35uann93kTuXV+c3LSdll9fj9329+ABUltMs8LahgnJa6rv1AfW9FZO61nTA5BiaYVLPpHTA5A41w6SeP800wGR7O5jadifjKzkNW5bT7Ew7UI7ndgxKPbO5vJ5OOwe1u423HV9+aeKN9OS0u493zkk9d9CC0+5G/omceFj/Tb1u2/qz3z7+Aw==7VzRcqM2FP0aP9YDSELwmDi7287sTneadtrtyw4BYdNiyysrsd2vL8KSDRaJ8a6JL3bih1hXYNA5utK9B4kBGk1XH0Q0n3ziCcsHnpOsBuhu4BV/xCv+Kct6YwlCvDGMRZZsTO7OcJ/9x7TR0dbHLGGL2oGS81xm87ox5rMZi2XNFgnBl/XDUp7XrzqPxswy3MdRblv/zBI50a0gzs7+M8vGE3Nl19E108gcrA2LSZTwZcWE3g3QSHAuN9+mqxHLFXgGl81575+p3d6YYDPZ5oRp/vTt75X4/eu39d2nX1F689Vxf9K/8hTlj7rBt4L/y4S+Zbk2OAj+OEuY+il3gG6Xk0yy+3kUq9plwXxhm8hprqvHebRQuDvF922zVcWciWzKJBPq3Gw21uaUz+S9vta2vOkKLlblLM9HPOeivBeUpnHiBeo6Ikqyovm1uog45XWlakilJqHhQ1ljA2dQYEKyVcWkgfzAeHHTYl0cYvq1q3ux7tXYsL7c9RFM/Y1tUukfvj4u0t1yvP3pHXPFF03eMUT6L1CmWs2FnPAxn0X5R87nGuh/mJRrjXT0KHmdRrbK5F/q9KFHdPFLpepupX+6LKxNYVY0Z3MWMcUv1brdaWXJnLfP+bMsLfijiNkLUOgRR0ZizORLkOkDWVIbAmzSBcsjmT3VR4TTM0i7ZPAiCUSwCAy6JJBeJIMYFIMFxvsMqjszU9OMz4p/tx15pdOWU9ejNVaHYYgPMFuWPpvJ9xm6FwVd8kbFTIUlVlN4Fhvz+yw3d/8KvQJUp/CsEOmeiaesaOkPxUi1uKgaMKkoJCsC0Js8G88Km1S96jYSseYKHx8wMT9xEtYcMCWEojBpCpjC0HcD0k3ARMzYXQmYXBMpVwOmkHZEK7Jotfg8wGC0mG8yjjRbseQlSk8AIHLI0AxPBkNqB51NEHpdxZy4XxB6NICFH+kZfjiEhZ9rZ69LLorsdWHhWDRR7o2/teFOT+3VcVObIj0MxwVwauq0xudpliTqMo0Dfj1WaB1gHUPK3sCKvSGxaPEbaEGd0WLPmHZYlefZfPEcat/Vw7sA1/FU6lvF10cNmb5ro4s7yzJaTFw9QZcgeOi2mNP6gS4q+i6Fhq494/3GNmnDVQ3ZyGKGuucdsxGy8L9W+RBpJfVgmoph6YcId0nhZTIIS0BEtvx0rQJiewqBKYj2YxiACmKNVPdIUjuXCVtzD0snNLfdZ6EwJerTLBQ+lJ8modAv/04UtoZW2Hp+rdAExNcsNGDPJub8WoOJwS4gXwvcekpw7mQNX4zQgJEHDNqLURlw4A6BiQz4TWTQwzUwjQG/LVHaQqGfpBwMcYnmEEiMi9/WKB3NoA+LwbdFSsdTSEFRSG0nBKgxBNSr0dqbVUqtuwUs+cHcdp/lhyRiQRo3yw83fuDcBo3yQxywh/Q0YROhDjz5gdjpwtXJD75jE3N++YG0WMLTj0SOkPrinrOnccSWU3sKrW/2MoGBll4MtCSEJj+QwAL3CuUHNVwDkx9oQyd/tbC5dS7klhvbKmGz4/q9CJsNlz0Lm81tV9x1lKvQ0+osp4mafyACpizymdN1BLy/Ut8L7QG1MfztbGsjtdcmAXTcHxIxuvdOs3e6Z95pP3g9q3cSFiS42TtDmjiUNnln4D2gEz4eh+ad9rIzgN5pqVEOCvsxrXr9dFwPlOOmaerFz0yraZqEpXvabwzwH3xyIsfdbpYB47h+z3bAbU+Bsv3I79n+NxxgewuhnZS9LoY92wOHjXYCBsAWMhkoAL39TZjnBrCFGAYKQMcBBqAteIEG0PfR0DzSAYOh/fAONoaIDBEwR6a2kgMbQ8eHh6Gdb4PGkNAAHoahBRnAhPh0cpVuUDXlfZHdgymveU3DwZTXpEZAcl5qT4Q3n3+xOsPFPvNp2Ne69bJzPfQxi+rfnPE7nTFo+1jHSLVAnDGwo4HRxytyRmxmbjCe+ErvVuuHV+G2XuXB8ipb8vrjipzK38u9O3Sqorh7NXBZV3nBMnr3Pw==7Vtdc6o4GP41Xh4HSMLHZdV6zs60XXfcnd1e7XAkas5B4oRYtb9+E0kQjLq0FYXu9qIlb+CFPM/7GWgH9Bebryxczh9phOOOY0WbDhh0HPGDHPFHSraZxA9gJpgxEmUiey8Yk1eshJaSrkiE09KJnNKYk2VZOKFJgie8JAsZo+vyaVMal++6DGfYEIwnYWxK/yQRn6tVIGsv/4bJbK7vbFtqZhHqk5UgnYcRXRdE4L4D+oxSnh0tNn0cS/A0Ltl1wxOz+YMxnPAqFwx+4i/Bbz8e4fOQLl5Hm6/h35svSstLGK/UgnuM/sSsI5UNxe9HEkUxXou7qFXwrYaG0VUSYand7oDeek44Hi/DiZxdC2MQsjlfxGp6FoeppMISxzkScmKJGVlgjpm8liQzJZ7ShI/VvfJxZh02FGP13JhxvDkJiJ3DLOwTU3EbthWnaON0lW0q04SauvWeaOAr2bxAsqtkobKtWa56D784UAy8hQ1k0PF0/3v/16fhGfAlopTxOZ3RJIwfKF0qyH5gzrcKs3DFaZkQvCH8L3l51wZIjZ/lXNeCQI0HG6V/N9jqQSLWml2K9PC5OLe/bDfS1x1SKJWONPvqnGydODKc8oBUgQVdsQk+B2agwkXIZpifOfGUmTAch5y8lJ/k8py7Bud1kl2i+io0V6X0pJ9aXcsJ7JKretmoMkNK94gScctcMUSOMH4r/ynfAwVuWWFmcUpHMdAaau2uh0q6XB1qtK7MKM/o0ifS6TTF3LCyHKX3Gx7yDMPrP/xi2J4k7iH8LtJ6yaLCmMwScTwRJErv7clQTETevFMTi13iEBMMp+Q1/L7TJ+1jKVe4Wx3qddDgLbFdJXmlbJ9ai7Z0xsnOWJgDHPgxm6qfMNskbPz0OKozWvgB6JRSQ54q/iOpAdw0NfhHvVGXRQlNdv5VT3Kw3kW0/UaiU0EBv5N1uowlskYkEy0eklg/4i24v21VEJiVIGUJkYX5GLMXMvlgPV6qwYvF+WEc59KKeiGbKNrgbYpzZINqxXng1USJTjYFStaUiV4pNZgQy+bvTZfHeCu7eA1oQ8coWRB0usiA3D2COKirHXLM7tQMiHFMlukp4IoMpMtsf2BKNhLK0x5QhzUH4KAeNFtNaJvQwrrii0ay/dBCYHedw2r71uiCz4Iu8q2u2zTbhQa632gqWgq9Z/WEuYzM4miAZab8BAEaeE7X90s8eBY0eLCRl8fxq4RogEzDvlqJWnn/wnb8UjfStURDeL5Q3Y0OW5DbV69A7Uk0u3oF5p5WPyZY7XVcvmatI2MjuTFacjgnMAOf7Vxzfxh4bfC2D3X+N3ApvxUu5bfepUBQbuka4E9BG/zJ95yD7CWeu5XZC1ptcDVoNvptczUEYPOyF6zQc16sF7oEiIFRAiDPrLmPYejUhmGFzrJRGNquieGxjaXromh2kI1GEbh+E1E0v1ZoNIpOAA576AagqL8kanYNcrmaXi2oWGVc+O0e0t+J/VuVAU68Fb5OmaEfs+A+dyPz/XvrtqocLzh8l5B7z63eJCAzMv3vZB9zMlDVyfybOplZLx37yKV1TgZ1rm2Mh8HreFhLvQVV9BZoHaf9St5iVnR/fAJnQW7T0lGFbz6bVDlD6FyxExbD/Vfx2cdz+/8tAPf/AA== \ No newline at end of file +3Vhdb5swFP01eVzFRwzktUnWPmyqtGhr++jCDXhxMDImIfv1M2AXCF1KpUbA8hLu8Sfn3ONrMbOX+/yO4yT6zgKgM8sI8pm9mlnyhyz5VyCnCvEW8woIOQkqyKyBDfkDCjQUmpEA0lZHwRgVJGmDPotj8EULw5yzY7vbltH2qgkOoQNsfEy76CMJRKTeAhk1fg8kjPTKpqFa9lh3VkAa4YAdG5C9ntlLzpionvb5EmhBnualGvf1H62vG+MQiz4D/HCFnHv/KfMwYetHatz++vnFVPKk4qTfGAJJgAoZFxELWYzpukZvOcviAIppDRnVfb4xlkjQlOBvEOKk1MSZYBKKxJ6qVsiJeCqG3yAVPTdaVrmauQxOOogFP1WDLKTj52ZjPa6M9MAti4XaienIuMubojJlGffhAlkqcwXmIYhLpKoJCyYbKyhZ7oDtQW5QduBAsSCHdqphlbHha79aVPmgdP2AxmrbB0wztdKSkuLlO8pTKm1VKHyMiIBNgks6jtLZbf16U3oALiC/yIE+KbRv1Dlh2So+1q4ztZWihuM840q0oQ5tG+AHIik55632gzkK7pDTkzvnWtw5k+HOdsbGnTsZ7pA1Nu5Me4LlbKBq5k2zmnlTqGbnp8rw1cycT9AZ7kDWWEzTGospWOO8aIzAGmhYa1j9vfF5Ka5T990cR6NKcdOZ4DE20CnWW2JnXBK7w0rs/pcau+PSWJeNulRJCXfAO9K/U6BwmlRf/bYkL+S/zmXOukGtmmV7b9QsXUai1udAdC0Gu/fgHxCSVM/d5DCNcFI8xkz0qPkv2N+FpZ8eMkFJDAoPMN89yFFElGl8Y6AzuuefQ/fcbV8Q7IWp6W/Q7b5xQ1hYHyZbhvUX2LKt8R3bXv8F7V1Zc5s6FP41nrn3IRn25dHZukzSukl72z51sJFtGoxcISdxf/2VQGITxiQGgxO3D7YOSOCzfDr6dCAD9Xzx9A45y/kNdIE/UCT3aaBeDBRF1iSFfFDJOpZYthYLZshz2Ump4M77C5hQYtKV54IwdyKG0MfeMi+cwCAAE5yTOQjBx/xpU+jnr7p0ZkAQ3E0cX5R+91w8Z79Cl1L5e+DN5vzKssSOLBx+MhOEc8eFjxmRejlQzxGEOP62eDoHPlUe10vc72rD0eTGEAhwnQ5/z/5bjd79+f1x9NOZflj+/jFaP5ywUR4cf8V+8Lnv0QHjW8ZrrgcyFlE5aZw9zj0M7pbOhB55JFYnsjle+KQlk69TGGBmRtkgbfE2+TUBwuApI2K3/Q7ABcBoTU7hRyWD6ZA5kWyqcfsxNYnM9TzPmMNiMod5wSwZO1UU+cJ09Qy9KaKCXOI3rAkRnsMZDBz/MpWeIbgKXOAyPaXnXEO4ZMLfAOM1056zwjCvWvDk4R/ku3Sq6Kz5M3PogqpL4o01bwTk98a9dN78mT2WdotavN9Gu4VwhSagQjcswLGDZgBX+Z4en0g1V+kGCPgO9h7yEVlm0qjrECFnnTlhCb0Ah5mRR1SQ9S477106866rjR0Mu6oD+RLfQ+peyY95ucep3Xqc+So8zuiHx8n6Mz0uAbwXd2DTbqsuqgmTyXeI7gESPDfvlz2YUHSp5oRitDWh6AejO9Xqm+6MnaBR2hUa20fGohlDgnR4SLNbIglgAHZCT0WtC5920/C5k9lNIWT6mLsm+X9vcler20ziBeEi9yhY9MMMFvsggqU4t3QfLJyH6ChaasdK0Rr7T8JVu25kmE1HxsuycKWwjNOMbUl1AcoLHdpJqmV5JwfcObvZw8KvXcSWrZp+2TgdsRvwdEoxyfs0euXs0SDuWD3BHVt+Hu5oWhe4owoZA3GvshXpljzBCZcxMT/1nqhrtpRlK6d6TkmqUpI6cMpxnqPs9bZCWCREbgHzZ8VwFlRRwTikH58RTcaIeugUY/jk1s7GRNPGjH67A+jBo30KiifKwXldh5ia6Bz6EKXQOfV8vyByfG8WkOaEXpTIz6iqvYnjD9mBhee6/qYkMD+7ZM1p8ja7yZJNiufbVrbzltXVEsuaJUmh2lpSKLI1KDHsS/NpIU6a0J1UgA7OA2Z1p5ui6risedUZh6I6Ars9U51IeHCscOg9IDCNpMafFYxxAdNN0iG9qnKFppPTvwDBxZ9TiGZEQHVGPhQrOq7iOThxgeMDdBLCyT3AJ3i9BOloHIwuLofXl7f8wqSvw4+IwPUekERuDJwdVplVs0dTk4cs5QFGs2zB0upe8cXqn6VTq5bZ/kAsrSl9s7TIyzRsaTJZ422Wvv387WvdmP6yAqsmoboFKytGIRWULJ4cdmVnpVsa6UV7FDus5oA/jhbvPK2kd7paLDNnlK3xI9mVRzW7IxeVlAodGEubOHt2xRANvAmEm0IJ8GcLRFx+KceHPgNBcWJXVKtjGFBK4L5Ldm//5B4I3MaoPbNmlHMqoGHmp4TaKexfkLVqfoz4J7FuVZxPcaSEzuAjxT9aGKkp+kcR6R8BiTJey+FiDJFLso1JTDcMowHQPycnWfm/MawUAGZ/gNbobR8kLhYTpO5xUWTKDhAXzd4AY11KXGmnBHM7hf1yYKTJvLmB6t0XNor03xEbXws2FimC7rFRZEw/fb69GlJrjRDEkFhgQNeXxLzSp6uRoN7+bRNYgza2CURgkHn1RtZ2WrJTtB/zlbG2BQvl7UGi1I3+UUvxh0ZSs/HHUGSBIXDGIfRXGAzRhGuaSpOWUjBD5Rr7WUy5Vdh802xxi8Ys1TtfgDevd5FD/QjHoah7AoOTOXNS1wnniceyR4qccXSqlA+hbLQQg5k6/b8ptIgVl3SMxdOMPiV1OpssldMlQKEXYqL3X64X3v8KA2cZziMAJebA7Dq+Mwb+CIYe9iCNOBSrLonE68LxJCJ57PpgiqsiNyRB6wWz6+i0C73gIkqpi1Q7+nbHYV4ilbFySklsMtmOSUjeRZOdKD4AnE5D0E6+UEbyvi4I0IzeAQBH9CMA7AsA7Npu88YAQBVp3dcGACqfb/maTCsp0tBK4t9sq/qGO01G6ayQhmCAdO5gx4czkViPH7uhp1yT4BSPfwgeiJpg9OOTwpziSTc3H6oOX3nE6sT6G+p6jojUECKp9f24ApEUu2zeSqUHi0oixfvaUKmYlpQ9T2CVLObbS0pEovOrE94fs5L2MECt7TVvLSsRaczXFv8kLTntGwKIdN4RAdpFAL2237w1BHj93GTxYYvu41/kJY/x3278H4nJLY9OZXwxU8jbdslZZWFqsxuRFfWthajr1V5ksU5DS59y6mo3UhNZ1Sb8wk79YrkaV5cijr6VlqCLDntJmRoRWHtlYskw8ybuuiJd2+1546jIJl/q223hclqtWKMOxxoUKpMnvhOG3oSLWXFydCNiibNYwUMkjdQza2yi2VrQozVe0LObM4kc7A0MPAxRO9NLBkbCbTBytwlGDgkuOq9O4TR/P+AiAxYpdLRXz9xbuKj7Qjw+m/cFLkR29I5gRckOyhEs6qSPUh4s1JInWPcLFmWM4zF5bM7AJU/G79fAIrl5NPAuu+qFV9NqVudPMmpmnyb83PogmeO3TfhKbsZP3i5XY8bn7XKGrV4msGXaL8kUdsgEOnv5UdT12e/GUa18GcnWF91qslTVYdDOi25FDrenL7FQdbuwAdb5ayw0+/ABxHwzAMLfVH0oAKLZyjMBxOwAQHSRRe0pgGiGuj8AoTlg8vcwYl2nf1VEvfwf7V1bc+K4Ev41VO15CGVdLMmPDGFmdk92SIWZnT1PWw444B2CWWOSsL/++CZAahEM2MZDwkMKy3Zj903dX7eUFuk+vnwK3fnk92DkTVvYGr20yHULY9vG8d9kYJUNCIdmA+PQH2VDaDMw8P/18kErH136I2+hXBgFwTTy5+rgMJjNvGGkjLlhGDyrlz0EU/VX5+7YAwODoTuFo9/9UTTJ38K2NuOfPX88kb+MrPzMoysvzgcWE3cUPG8NkV6LdMMgiLJvjy9db5rwTvIlu+/jjrPrBwu9WVTkht96X594Z3H/+Z9p4PfIlegs/7jKqTy502X+wvnDRivJAW826iSMjI+GU3ex8Ict8mESPU7jARR/DYPlbOQlP2LFR/HVH/2pPLeI3DCSd8+CmSfH8muSO+B75A/ljRTR5G/1yQsevShcxRc8bwQi5THZkoUcC72pG/lPqkDdXC/Ga3LrX7gN/PhJsJWrMHIwbdvZXbkSE0u0ua0SWgTLcOjl926LQSOHLeoUIBczaexFgFz8ZYsFm6FU1gfIHYpZFWMQRpNgHMzc6U0QzHNp/u1F0So3UXcZBaoeeC9+9Gdye/x22dH/ts5cv0gFSQ5WG23RtGOjP1aqK2Hww+sG0yBMn5Kw9LM+I00SnaRHpyuIKk9BrULCLKJq2HHauJiqlaUb5N0n7BN5YsSKmDg71h0gjjWRA1oV+wK6X96qPJ8nfuQN5u4wOfscT/uq8EfuYpJe+6pVPnlh5L28Ksu1VXBLYRASeO0vt+SNbQEF7ojdslUYeSjXbMC17tT3UtHOw2DoLRaAi/H7RiqrVP+WG8NDbAfakDv1x7PE2OIf8OLxDwn3/DhK6eQnHv3RKPkZo2xU6T0Es2iQP5QhiDhCPIyr4uGOSTwIESgeYlUkHvbuxA52YlRo0jjAidHXKVXswjiQ9qDzR6+F2TQxuPsw/jZOvt31Brf9L4PeaQ6uBKPBFsMqx5jRp3FmsBm7be+W9UlWI6qcCkAw56SfkhiKkcpQWwjATSEM1hRPwBVx07kcbhIk1mHotndyTAy1EUhnSuOpxAV+RqZyrjKVcm5i6hoxUJhqkao4WgADOGv8h22Nb0ggozKarLuy+A/t49IlpdBZIPAaN3LtrDrXxpSquUDRXLusSAPB7PjDXf+/vbtXtAHVE1IQoZpJ7DNMZoJM7oVVFYcj+m4mW9ywazITpoWXtZsJTI+/9+9SM9Fj8nmwSJ48moSeOzq/FTGriVYE09nf+vHrWh9v+t8HgGmnAw1T7yEqE2bIjzOLxqQkWVEtSkUyOtwWlClErQx2QDATBdI5EHeQzm7L1W0cn9nZneCgREEHVRNEgYmWh9ikKBxuih32E6vaL8IM+7Y/+LpnjqzB7WGq1aSIY3J79RpTgQT6MGPayamm6DtQUUccr+96EGAiVnWdEabrzdB3qvNGkLPrO4aZ+Ofra8CrxcSdJ1+Hq6kfMy0k+zl2n7H35n494A5/jFOm95dRTEbO0otshkZ2WZCHrdUGHGPqzgxs1iHk8th8gZl7QvjWC/2YQ0l96MQooK7KOeZCVY+jK+cxJQa6Kqy6K+e49NK5qc7U+ElMN3qKnDZzEOKMcssWghQSyn5tSejWPZ/BYvmuvPbv4L5Baa2wkcK7RqS18gEuyROf4HVpTV7X0dLm471ubNRta+tz9t4lXHrdv/neVvOKtkWPTxl01TARq9rFQgjlpt+5hg42RcDOnUcIDS6kwjb5VWMdn1XlVguU8YfL8Gk9/1yYRRRR4uMtwi5sETFP3dXWZfPkgsVrD053Pvju59PvsfV74i/Zk5Rrpg50rLH8JeSbz6OlTeeoVRz+jPUyXP25fbB1V3K4uS09kveBaT6b1IzTvA5nK4H6xoi2e8J2WpraWnZ8ACGBiUwdX7mQlV6GOslfEQgS3X37cn7f7mDNtBAz9WiZwItNH3f53DpszUZuiAX6GkG+uVuNGz8FcME0r0jaAjubz9GzATqEbsWhkiyOFOl7/HbTANRVyBxHso8YO6Bq7nokELbZldQnfrUxST2xkBr6NyKpJ+8dHwbdqjqpJxYvCUqNKbG2XEpxPiyVwA6S16a5S8hbLEloJU1UFGL5/jIis2pHSglEYj71GjAFYapFBo4xYa+18EdK7xox1Q2OSaVOcHxOs4zL1ooSjOGjQQHgbE3EqravAiDPAWnATkHvTSabJmjg+xgqJfTXuyn20K1a/LA3phHuVe+rYNQY4dfqXinEF050rzs51RQj0IsCTBQuxRTwdqL2/QooBD2+B+EPL2y9teW22Gbqqk3EjUuz6l1uK33u3qJNY5AIrW7DOGrA+kt6WAPJvkl9B5p3LD5ugAqLYIJ7IwhaFI4+UwTBceHs1+Q8NWKGVLpq5wkRmSpKNUXWxO8DZY6fWMvGTzhGReVkIGbvJ1YxfkIhfpL53kGTl8U7XMXXuWWb3HK94eNhPSVvs+CiY1dchx+Pxq5iSrU7TIjHGGxnS/7sn2WQDgfhyAuvhlk42UlJhL9cXW2P/ycVp7yjeSZIiO676PkzOAh2ZOvfbvr9W8CyCw38Ha3aQ7mx2sNrFQyEITq3UCIxJX++2MW3LUG5i3m2i+OD/5JwspT9o4SwdcYBrpn2DqhsBYJ9GCBRHXZnN2wW0TE2U4f3z9QubkPYIivQW41oXmRCjbaohc/evCg3jK0+2jKkKKfVeiWfmmJNeky23iDi9JWzAm7xU7UplQuNXGYUrsuJEvt4/wmcMaG1Cx0ut+l0u73br03OYdd7Xq/nHXH2HNaGaEAWQGercdN9JADnLjSOpto+OdRcokImQ61OQLt2+Eirem9aPpwYE9Ca5QMhge7Nr70vBk8ULmd/pYv/djqietoDkeOo8z9GxsXX9bYHys6yUtsDG9HqJ1lW+fo9pO2HfPz6PcRRm6n9ULhgg0RpUzxcR/KG20Wlm6lch/Tc4HgdSv6fAdShussdDEIqP/veGUgL5puwdwY7bPHJpe5wgGT7lcyH8QnbUiGtQmwiVnGeJTO9qgHJpshP3/n8JPlZTABitiDrD2b1yhICJU3CHC35fOulBcWXAFWFObIqlqycexFq2R0Ye4sJhfcOLB3XlDBRXbGGsbWi/+2uC9Gns1a+CLK1ypdwgKXVWvlitfVSlI7us2ZtjYkcbSdsSk/YKhAAx1RbelT1pHXYCph60f396+mbpRrAQVLZ0FKCXvB69QI2fsACwKU24sim93X9hQDnXWsRQTae/zTLzfV/ssMpb0CTN4dQRbrdRSPic+QQNT6PVezs8Tk/DHV4k7Vf8F8oOT+hdwa0cxqIVf0/vSAm0RC/ghydO9LPnM1A3psjikRF6nzKKDslYLZ0YvVaB+yMuP51cNv52v38FkIjrUeZESM0Xmt0xCEq0Bwb3JvCvPeuVWmsEPUwGWvjrEzzcYStg+LyjSw+DIPE3WwYH7/U5Pdg5CVX/B8=7Zlbc6IwGIZ/jZfb4Sj2Vnuaqdtpx+npqpNChGwDoSEo7q/fIImIoa27PRB26o1+bxIIz5svn8aBPYmLUwrS6CcJIB5YRlAM7KOBxV+uxd9KZVUpo0OnEkKKgkoya2GGfkMhGkLNUQCzRkdGCGYobYo+SRLos4YGKCXLZrc5wc27piCEijDzAVbVWxSwSDyFa9T6GURhJO9sGqIlBrKzELIIBGS5JdnHA3tCCWHVp7iYQFzCk1yqcScvtG4mRmHC9hlAE/+5YEvHS9OrG/f2In5+fPghrpKxlXxgGPDnFyGhLCIhSQA+rtUxJXkSwPKqBo/qPlNCUi6aXPwFGVsJM0HOCJciFmPRCgvE7srhB66I7rdajgpx5XWwkkHC6KoaZLkyvt9urMetIzlwThImZmIOeaxikwxITn34CiuxkhmgIWSvMbWrjiXJrTsIV04hiSGfIO9AIQYMLZorDYgFG2761Z7yD8LWv7BYTHsBcC7udEvoE6SK87WvpRXLCDE4S8EayZInd9PDEusJiBEuOZ9BvIAM+aCFdxlPCCaUCwGcgxxzdmOAUZhwxecu8KnY44xR8gTVjnOEsaq+aOICUgaLV6nLrUkmqtiYLFvEyzrNTZm70VaKj4xPMspWjJpgVD7gu43aa/3/Ozp32DU6py/o7KFu6Ny+oHMt3dANe1g8O6qdXj9rp/ddO/fZxbqvnaMepqLXUS4e9jMXD79zcZ+y2H0uyp/uXSWjtX82flxSyWR5M6v0SiqzjwcAHe2be1vs6GWx1a3F3n/psauXx+rZwSx/zHyKHlsK5BslEWRpdZI6R0W5BD7nd7B14Dbqlj1qqVumq9atjfgeiuckffAz35uann93kTuXV+c3LSdll9fj9329+ABUltMs8LahgnJa6rv1AfW9FZO61nTA5BiaYVLPpHTA5A41w6SeP800wGR7O5jadifjKzkNW5bT7Ew7UI7ndgxKPbO5vJ5OOwe1u423HV9+aeKN9OS0u493zkk9d9CC0+5G/omceFj/Tb1u2/qz3z7+Aw==7VzRcqM2FP0aP9YDSELwmDi7287sTneadtrtyw4BYdNiyysrsd2vL8KSDRaJ8a6JL3bih1hXYNA5utK9B4kBGk1XH0Q0n3ziCcsHnpOsBuhu4BV/xCv+Kct6YwlCvDGMRZZsTO7OcJ/9x7TR0dbHLGGL2oGS81xm87ox5rMZi2XNFgnBl/XDUp7XrzqPxswy3MdRblv/zBI50a0gzs7+M8vGE3Nl19E108gcrA2LSZTwZcWE3g3QSHAuN9+mqxHLFXgGl81575+p3d6YYDPZ5oRp/vTt75X4/eu39d2nX1F689Vxf9K/8hTlj7rBt4L/y4S+Zbk2OAj+OEuY+il3gG6Xk0yy+3kUq9plwXxhm8hprqvHebRQuDvF922zVcWciWzKJBPq3Gw21uaUz+S9vta2vOkKLlblLM9HPOeivBeUpnHiBeo6Ikqyovm1uog45XWlakilJqHhQ1ljA2dQYEKyVcWkgfzAeHHTYl0cYvq1q3ux7tXYsL7c9RFM/Y1tUukfvj4u0t1yvP3pHXPFF03eMUT6L1CmWs2FnPAxn0X5R87nGuh/mJRrjXT0KHmdRrbK5F/q9KFHdPFLpepupX+6LKxNYVY0Z3MWMcUv1brdaWXJnLfP+bMsLfijiNkLUOgRR0ZizORLkOkDWVIbAmzSBcsjmT3VR4TTM0i7ZPAiCUSwCAy6JJBeJIMYFIMFxvsMqjszU9OMz4p/tx15pdOWU9ejNVaHYYgPMFuWPpvJ9xm6FwVd8kbFTIUlVlN4Fhvz+yw3d/8KvQJUp/CsEOmeiaesaOkPxUi1uKgaMKkoJCsC0Js8G88Km1S96jYSseYKHx8wMT9xEtYcMCWEojBpCpjC0HcD0k3ARMzYXQmYXBMpVwOmkHZEK7Jotfg8wGC0mG8yjjRbseQlSk8AIHLI0AxPBkNqB51NEHpdxZy4XxB6NICFH+kZfjiEhZ9rZ69LLorsdWHhWDRR7o2/teFOT+3VcVObIj0MxwVwauq0xudpliTqMo0Dfj1WaB1gHUPK3sCKvSGxaPEbaEGd0WLPmHZYlefZfPEcat/Vw7sA1/FU6lvF10cNmb5ro4s7yzJaTFw9QZcgeOi2mNP6gS4q+i6Fhq494/3GNmnDVQ3ZyGKGuucdsxGy8L9W+RBpJfVgmoph6YcId0nhZTIIS0BEtvx0rQJiewqBKYj2YxiACmKNVPdIUjuXCVtzD0snNLfdZ6EwJerTLBQ+lJ8modAv/04UtoZW2Hp+rdAExNcsNGDPJub8WoOJwS4gXwvcekpw7mQNX4zQgJEHDNqLURlw4A6BiQz4TWTQwzUwjQG/LVHaQqGfpBwMcYnmEEiMi9/WKB3NoA+LwbdFSsdTSEFRSG0nBKgxBNSr0dqbVUqtuwUs+cHcdp/lhyRiQRo3yw83fuDcBo3yQxywh/Q0YROhDjz5gdjpwtXJD75jE3N++YG0WMLTj0SOkPrinrOnccSWU3sKrW/2MoGBll4MtCSEJj+QwAL3CuUHNVwDkx9oQyd/tbC5dS7klhvbKmGz4/q9CJsNlz0Lm81tV9x1lKvQ0+osp4mafyACpizymdN1BLy/Ut8L7QG1MfztbGsjtdcmAXTcHxIxuvdOs3e6Z95pP3g9q3cSFiS42TtDmjiUNnln4D2gEz4eh+ad9rIzgN5pqVEOCvsxrXr9dFwPlOOmaerFz0yraZqEpXvabwzwH3xyIsfdbpYB47h+z3bAbU+Bsv3I79n+NxxgewuhnZS9LoY92wOHjXYCBsAWMhkoAL39TZjnBrCFGAYKQMcBBqAteIEG0PfR0DzSAYOh/fAONoaIDBEwR6a2kgMbQ8eHh6Gdb4PGkNAAHoahBRnAhPh0cpVuUDXlfZHdgymveU3DwZTXpEZAcl5qT4Q3n3+xOsPFPvNp2Ne69bJzPfQxi+rfnPE7nTFo+1jHSLVAnDGwo4HRxytyRmxmbjCe+ErvVuuHV+G2XuXB8ipb8vrjipzK38u9O3Sqorh7NXBZV3nBMnr3Pw==7VxdV+I4GP41XMppkqYfl4Kos6vIys46Xs2pEKFjIZwQB/TXT0IToE3BKhRaznox07xt0+Z53u9Ga6g5ml+xYDK8pX0S1aDVn9fQRQ2KHwzFf1LyFks8344FAxb2YxFYCbrhO1FCS0lfwz6ZJi7klEY8nCSFPToekx5PyALG6Cx52TONkk+dBANiCLq9IDKlD2GfD9UqsLWSX5NwMNRPBpY6Mwr0xUowHQZ9OlsToVYNNRmlPD4azZskkuBpXOL7LjecXb4YI2Oe54aLF3Lm//Pr1n68pKP3zvwq+Dk/U7P8DqJXteAGoy+EqVfmbxoHRl/HfSKnAjXUmA1DTrqToCfPzgTzQjbko0idHkTBVOJuiePlsuWJCWHhiHDC5L3heKDEz3TMu+pZy3GsCsAWY3Op+r0J42S+JlJLvyJUPIa9iUvUWQfUPdda+8HxDEotka9Ymq1IRp6SDdcIdpQsUHo1WD5pBb04UOh/hglsUNFu/du8a19u4UICTBkf0gEdB9ENpROF4C/C+ZuCMHjlNMkPmYf8h7y9DhBW40d5rm7ZSI0v5mr+xeBND8ZirfGtWA8f18+tbluM9H1pRuWkHa0M6pqNLE/pK+uRbdj5yjMEbED4lgv1jKSfsHpTaxiJAh7+TjqB/XPuGJwXSXaC6oPQvJHSDwlQZmvVLeiDhKm68Sg3Q2ruDg3FOywnFoGpvu4Oks/AnpOcMFZBNce6kzWmBXUHJuZydAjUc8VaumUufSF9fp4SbmjZEqWvKx52DcXrdq8N3ZMs3QRPIqQnNCqIwsFYHPcEq9J6G9INhyJmnqsTo7Dfl3M0GJmG78HTYj6pHxO5wsXqcKOGL3JpzHbLSQeAZSagnpoIttkaBhG0d9Op4gkDGYS1bztFegvPR7VEaFiGitMMDahcocHLtEadJY3peGFfxQQH60tEg08SPRWc8HOZo0tfIlPGsKfFl2GkX/EA3JcsK/DNTJCycSiScqtL2O9QLHqn9DyRkq/n6mk/zqUWNQLWU7TZOXL1fjAdLl+k+MQdWyhf4u67BdGl67s1umaUiRpqarAklsy/GkqzOE2afwFVkzifTmcwgnVsQO5kII6KKpWgWbWazjKKwsl0E3DrDEwncd/gOZxLKDdbRwH4+lAodypbNAtRG5jg2kV5H41l9cGFCNRBOhc/NrroVNAVTraOSwaubYB7Taei3pAu+lL82yZcumZxdEFkGD0BDw1cWHeTPLjA5AFgd3nZQXw0wqZeHyx/zd3cANBLlCp1SxSW27PYxShdnxw8tUWq/q1YaovMhlczColqhOw/oS3A4LSTU8YGvQxjg4dsHCO3Cpa2U0ugeHPyqmlOXuXNCfggnQKXwKT8KpiU58JU8BLvXYXgZVuVtDbbLPSrZm0I2SUzNTtHQbS3MmgfCOovD7od4mS1Q7IghIVBaJY9pYYQOF7pIDQ/vpcaQh+li74SgKgz43JHzf0lompB63Fxt29VWG9w+igu6jKqJIFRv/ea9Zx3vhnKULneiuunm98wo7Vy0NY3Nv3S/zb2KRtDeW3MK5eNmXlS8+YEbEx+YUrXf0c3MvswRlYNg8E5DUaXR2UxGDOl+34C9oIcu3z2kmMbY5myZ2gnozp27INlzo+W9TREnf/YX9bLwxA03+05zNiFbTqgSnwUhLb5ydUxtbOor4KZ4MKTAReb4LpHBvd0Pmf7xufsYytujrZONbC1LQPbY+ttjn5PNbAFlrmF6NiKmyMdqAi4wAT3gJrrfH88b920frIHfPUDnt/wqyE4276vYlWGtFbSffYDYPENgVpWGfFh8ZIJVkZDLfM6mK0Ge69UMh/uHJfR/IQegRh0TGK2b6wonBj3JE3NPiajZqHVvrtvf7uvQSeSfYAnJo4G8ujh7v7vVoa8c3/XbHW7hmbsN4wt99dnB6wMhjbGMEdXrfo3XV3zM5GfUez6eyh2N/vakmQHOyGLHT+V1mL9y4nHSg7KVI7tprW2XS5ky1SM7YSsC0qnteYutnJu1N7NW/hJP4yztt4cepe2boRWpXOL0iAW17kVw9Xf5oh/jXf1F05Q6w8= \ No newline at end of file diff --git a/images/Nornir_Service.jpg b/images/Nornir_Service.jpg index 05faa5cb3aa4df11995c5f09e93f9cc753a3abe5..5e5b1c553a20bc9e10a3644546c3624a619ae0e6 100755 GIT binary patch literal 41658 zcmdqJcU)83vN#?@5s)GvAVrW8ARs8AccmvZ>0N{*AXN#yTj-roLzUh^T7XDV1f(Oq zgLEmP^s0X2x%b@5Dfhl}-|xLY-f!iT?Afzstu?F6?6qcQ=g0VukASOCMTjB*4-Wvq z!+iih7A|l>7xNseSOBe3|0QL^fNG&DVn+AqPH-F-OP3ADCC%?jf za>dnqF#5B00APssPxAjO6cHTZ1j7-1#eJQTxW;kJ!g265tG~hSKVgf%!Qnq)q>h#x zj^-r}=CV2mTbzU4oS!)3XgYq>QxH62mem=&1tN?ZZ1mGqB0&oDp z0DL$I47dZ}2MGU|0>}aIFZ@hDfAMkYBEiL4jp7l?4#R#yQ40z4e+mq;&@kP;E%KY9Lp5= z6aX2(_rj-t#}huNv2k#UT(*u0FgVaLxTtu$io=_N>zOonSa=Up?V@VTc`>Ujpo_|e z6r1<|&f7mPAJh1Yj=-EvN)zqYk8eM|&8^(?18@ph#k{799qHBT8%kCw5_FN!}fS*|o7AruU}jxOZ6heTsIB)@)ZzfcC@b#5InSw1@sN zG4Er?F^86=-Q5#SgIMPYIR4vrPap<;8ga|oWyxurAXlm{!ozarB&ERx2$R=!Rbb7W zVRqVPms+>pGRSIi@*unvi#nj6Joi0);Nb zTE}`Cl&+5O93HIowrw?Vg>9(2x;qrHC}!itfG#NCOwPFdN}FRy^&rr3lAWINxz>!9 z)&yl-mB9UysN3|N1-rzSNJa+_)$-`t`%|p}Y-qK*yisGW;BG{w!f?YIsk?u|*?)w> z<3`Ucq-P(V_`$x1UH982E{HccbkBeP1K^i`$o#3I><1wDlFpx139Z)NUoACDz36jK zojXf6E2CV>=IkTeI`I?gKWYM;BITZUD+^rg{{HE6SaW*@i`f@yP40QCvZwPK-`A+e z{K8K|e*oNx3;wLB{|Fav?B<@|=I57xeEH;-jo$*bW+9!%bxFaIjS+QQSn+TSPep?g zPhO4v_kqHg{93E~83dxC5M>e7Nw0X2T`zpFSF$x0Oj?WAMlXk46awYc#Yy_tp
    zE7^Fv(ZsA;8gp!{;2pZF%YWnxk9$C4EWO6}3B~>;(LRCjDf%IA*(r&h{Ca}ONbZ$! zsJuR$frPUt!M-y7R!3OjX6$%y&k04+*v#bx71_@&8WL@f_JtE>=Op?f)h>#^SBIg- z6{QSv8{nNhm5*LuMmdOep|ow)&pN?3C%*o%{hz~ZTuxSppD_b6bUT_V{+SKcxotyuhl97LfZ`{?*kog9cvwMe1EPBABtfDm@BxgMt4lR%d0*7I z4p|bF0OLP;-egU=qoxoVriTzs8zDe1e!h9n%I~v%W`JNW%5~L4U9_x-mFFsvemEx0 zXUnF1Kw9_H$L)ln^2T1>6V9$iM(&pvbQgK^&AgI}_cY=?MI{l{8(*AseM5H2m2itO z-o4v;Y<8vS8`biINP4*fBcdF1)QzJ3_=L-g|PM4eI-Nfe$C&<%>s%>nifZxF&p{SJ_#&lJjk9mr(Sgij<2BoRcLvp^Ug_ zyf^2F4#zhX{J7hL&@RxMCh2EZNw{9qqWSri9c8Q_MMk`89b64NkiipC2$A7W%*-(g z!Pj$sPdl8vB0+!@Pr{gnq_1=~44Mc&UY$zmUgZrZcmUu$`5ZsHKyNGjfcw<(i@oI- zY+sA<9xs2ML9Zp1MzTg$NUky`NmM3^oBDBei-PN5=V_YvubryiY9hE+HnlwJKE z5BQ&1Zp`E%5kmKzq#%Vo!lpVKCx)@rlUL@n#vVW*zEofqJpdCEL+boVkaBp`A#w#$34;ggL6kn%0w_S7A3DOlo`Z^e6$VYZZ;W;kXp_EHNfKQ#yMbC( z$br~?buTXk4rpCREi#tb_t^8U^c)E8Ormm1*{iwY;%S!PKDb!~P^wwlZ_Wv{>`8T! z38=+ZS-HgkYv^ggMa^%c>%3dx)+&{pa-714YQSL+8q0g*ej#bhU-K8Wjh5*66&+O@ z*=Slzzbvw}qEXS0$J#uyH0kK*oE#Jtm44OE8TZ8H> z&Kf*}wyI7#45q`>*XYQsg>|c~3#8q*(DANIC`ZuB^7}i~4J0#8_Hd+J?3o zenX$d_3Dl}D=0WV(ZPqooQL~+p>-wHlc~MRQDG}^_v!!8oF|ID9OSYV`NGuW_$g$4 z=`84CV}bvG*A@g#nrD2wB{zn+?g$QLg9e9(W56<^WL(&4S5uy=HHbZ5pynp3q6kt?|jR~KaxhJ~!UP=Up5_Go(b4BDc*ou=Ro!H5Mk zb&DMaYWg`mY>Fgw{I4nSe^LQWIDNY5MUI=_H}rgF!t=cYhrcjH5h)EN zbu=_yE&>3=Kit}|H+GrqCLFr@u20cLRXmZ1TsRv#rJ2)@0LsIyrE}o$hY~k`GQ#R@ z=G)3IbIVbqY(BK_j%A5WE>x=L<10(QLu*Qh+szi&GZIAe$DXvoXusJF`$a4L0O&E83WPD2S@^tMC5VPDIN1%t_1>vODJ3`Q}$(L6XCBB&!2#R zRzfa%C4W#<%=wH}?&vpPe+u}RCQc3K+$_oCr)1X~-u==vFFd7|Q>!(cYX^Z)(4jMA zSs5e5hd#3$FsekKJenaefFM-b{jCt zjLlH=H@Jpo|9o8t`pD(VmCVi! z%5^hUp5#%>SL!+8A@Hd}Esx;v1F72ehpfkyd&4V3Litx74o4W5N(_cNq#~<@duG{p zz4WE0(=vsIs#jLO>=L;EG3Dz{?g`{KR_(Ky#Q2uE;|pEKbI>Sgm)w;nx%%y%@>z&V z_-i3v-h2$CkVjfhaZa}7>w1@GgJ&&OUWl$DX1e%!D?v)sCABXTg@K)trSxvn+7Ozl z^mknydwvinVo`&3)7+20E-Yj3EF?+6T@r}q1V95eiI(d(`zgMge$uOd#P|@q!sMOt_9Cgdc#(Lg& z+VtzGDm<9(xl^hB zS=hU9h6oJVG!+LN-u*fI7xABw*TXcLIoEy5LT@kNy$PDG$e)Ydm(f?i^P?QLni+?`{$yGawK7ZAysQ%J3cKz9shywcLrG4VM??~Uf z7B#bdY5Z}dlbaVW<`W+qRp(w5RKROoOwl?|H0@ij8RygSNv-yH?%S-#Llfc)q>m;{ z7nZ@}C&zbfee+E9bQ+2yZr4=4Fc?kJJBa|1Lu_O%(%un0Zd!|7er;9jeH*xz7A}AQOONoaSQRK0()%kD|IUW!3cTFN5Gg{C?B!lsP18p*u7fSQUR{y2&;V2 z)}G1>TEVsb!%E0D|C~5^wN5S4;TXy|dHJkEr7uaOt{8~}YM;WwzFmAw-bmo1dBy#= zyh6QWQsM$+iaP9dnlq5hLt&Atxi5Pnh&8yCizEdXsq02%5~-b2tQvTP8nwRNuT7MI zslKN)Nx!;q3LeLzT-l_04h00R<@4Xt9seTlMptA^X~ogI5>Hem7fWPqsH|yUfmM;c zrXqc?#x{p!IVozZNM^XAVk3zx!@4Z4#anT(3s1N@bwXJ9q^n~XirHh>X)SeWM6 zkj@)BbjXD!Fx&gc3EvUS2l+T!?N9RZ_#b|%2)y4uH?ESJpBC;}?f7w=;2tV}cJBUJ z=wBt=nOtJaV4%78yVSnZg(7wnz8;4vNRe}%;{oj?>_)2WApME>^S}uqdd0cvB_+Ar znGfVt(vP`9vA?hJCLPyM1@OCVjhcIK zUR}-Lu|UPiScx5fw~=>R(k)K(1i~1`WE(F0z@={5B@=j>%YKRLGCF)|m@Zw$EPi2h zU~`=pmZc-+IMy@O%T)`+NmSiQ%$8C`^xm7_^+W#8S`UJ5C|3a=gi|G!Ym%n2YZ~?DG z#zi{(yT;O-!D7pIp<3?7uyjZBsCc5d0#6U4Z}Jmr83I=3=6qCJ?QFNXCu|zyZ#CP_ z6Ws=Jg~6ZxzPA73{^EOGXGH=pYg&Q#<@Wq`Ese3^Z6lIZ;im(K0vyca5#$)CfF{eZ z=yL%9_AkTvDnm@56vIqLd8=-SGn?uyec0{q+`T^l{&q=#(Po%BxAYzCvlAN7r#Ae#Nhp(PB|wF=L7&M~ZlkHpKoz;!x?VUAeaRaN;grsc(C#j<>lB{@yL^foGf6ZA$I5_hn?;q^=9!2iV$^lNs9Q zsWEd7-%8$KsR>mKJ`mYWj&`KXny{i5bj}jEqUo%aVo$8;EJWwbK&qil7FiQoN4RV$ z?xErb%80``GlaXLjsZhGR1}!oO`e zmGTsj=4P)+%kuoQb4ZN_OQAh@-9vwZ_mPoSHYC|?S*lhFBNbkw9jH#cr=;$Z$k$iB z#XBoz^%PN@8}c#1*0FGZYe1jZaXE7f=_pm5m^LFB3Pjs^0okMb8qcja{PPRZ?&W{7 zY`yi892M8qYLA``(=zBQuxDF*MkhA*gf11zL~tk_DRw<2R8M^JshwY1t!)>T)1AHp zH-NrMtsmNCR6lEcy&*)GDX|$xv6B-%=>IYRciwg00$fb#&)@M(8L%8gnp*9^>WHa=sCz+YcWEYALvsxo3;;Z zyeD^OrWn?}o{*);{fzy=cQGY6iI1ZB;X&ew2e0Q>xnIKamzZSC%!cxSPYpz)sKdh)2<*cq7 z%Ikjhw9X;R2o+DoC&D;vv#Hvh#y}a|E6+R-%^aw<44%vp_7}HiGrC`;rB#ZJ;pzzQ z_LFWq3if^TM~=?>js!?m;?wU9ojrF=Y%NJlP%QU2aUZoWb%aA@)s(GUWIvQ9AiC?h zFhUgjtmAt!&mzG!#=$;rUHqOmK&9`CZt+fcRS^xr8-b3Lq2tjMl;RLx)D4~B!Us4p zXHnsk+w4_j9{97tVy52G*e0?fmO;)&`rd`%PWV!!kH$)cKhPK1_G;C{>uP&EN_O4U zCK}6b^RXALW(>&WoM2I$196?L-=0Lpp{*`>?&o1MvJE0rKOUe;O7?yLF1MS{HUvB5 zu~AW9;?mJ+$YL)Y?TuY!{bbTsfkt(*RDS&9VEh&X)T3Xs-)QD+t>7gZCaE2be<9>5 zEQI8m2%A@zjSx(^r&eJ;V%O6yBHa_)V<~U-swhM+;(G`7^YNRYZ`7H91Ng96sB6&= zfS5%Sn%q)N;V9pxd^dq-E#{}c;oN=nZzyq21@7M<{^yEPsj3*Rnyr4_TLliBja{j% z$WL>-G*w%~WTA3(Hn@8Y`cfU0QxY#O7Q+i-W7B&-WUlfr_S;NM*2#-TUJ*Ur$PK}_ zCH|0ArxigBdNzg`+kjI7KHJ><2o zp<}SEFdwI2D}?N(*!QEdrm-MpO&4V{cl1i*68}uqdqKVgEaf0Ha8^Hw8*7_m!yD1g zDw~v`Jb=Y$r!IK`l_BVzv;FsNGH+(%eSqU9gg;$1W6aln0PsC^_Yz^BZvMMW7HZ7= znEsY~X5m7XUg(-x8F#YoNFK;WC8;UTD|rUz|BN%BB%>I{WWNnyaTRK`SgPBMpUTZvqtU{m5RNq=Lh7cZ}0zp9=_53 zhDP0s1$^ZlQ}!-FHz+zNT29zF&M3-aAxjy8LPy7)_!}oPLUnOgA&P`0MHtOI=e@cY z^D@d7YXMA2Yq~wTDxN-R*qRIN;QY=f2f!5HtlQD3N#lE*sV8iHprok}c--d~t(4=o z##fPCGl;A7^PhU*-{jrNAH6c!6G;XvKJjf3H+|h1rfW^pDXcTJ%;S^Is?Vk_xczxX zWwTmk=jLeXpw4#adOEXy##%ch$b`j;ez0c6V@fI&(WkBbShy^;Ot@0^Ll#)DkO&M0 zPrG|dS{=}5W+gd1cC0PZtA3T!Hy#{3hm^3@#F_GTWA@g@UJ~E=t@1o~7yoE(GpuYb zmC$d%h(edLl;K;9%hc{O@)I7}Uo6@DN6A4?4Rqf(JWGY`*dqGH3wG(M5Oz|6slt6= zoTs61c7R*SQmMBmmbaESNNY$>urlgSNPewRV(8eiK<8!3NZlUG5_ZbNrOm}c_`)$p z3Iq94@p%a^{^?urH;)f9QuTUb8Me|_B8*w88wzOYu8AUvYP{kZ9O~ljd|^{CI&pA` z=~Uce1U%jrW5Qagda=iDiBV1;q|9-+vRRLH`rO^D_Qvxmf6lr?N@4I|Y=P)N75HM9 z!4`EDW4UZ3T%+^e?q*(!5PQPBo0bi4Pc6$pi%YTMKKI-8G_jEq0q!)qvYAiAi33*R zL&h;qg(P*^P&Y7d4`#Z902dK>lm6YI_NV@(?`r#zk~k~qZw|Y^4&()0mqYFO=9II< z4SG#o%Hnct2I<+{QnSI})1s%p68+(d+P>WCEApL{0hLZ{n?<4rj`8S(YIp+uHDlyLrTtad@lE`)`IUH)Q zJ-0#=LkfnC>0Efj!&_GHNn%TlM97&E6IDk__(Qpm5oUY?-Jc3Wm^43Ekx4orS^9FP z1%NJmFS6q{BFV#y);>SBWt}NitrG7N8-ZHNYwN!dyP0O3mg%LLSv8@&I^Bb{AHH;v zfcdt;UT8C|mqVmZN`EO>};@fD6GHXr7=oZ5F-y1CUjX zb7J9BcJ2$@Xmye_Bf>gpBP+jnF6qYshKR$M_rb(^_s$1m?@6Bcwk zPZA^x5fzTQ>*d=N3-f`XS}`gvG?NgqWdT+j6_8ygyFyiX!O5=ktD1z5Nuz=52Vv`W zNBKpGW9SR9QPObN!$F|3qb`c2szV)m591g@Rvn+))X_1lIEcYKWVEzq%C(3_Zdyn! z#%wxWnpb`A5=i+25XhM{yd5KH{S|4?F<%mAv&4Js_DM?oJLfj!J;7QJ=j|Z>$$kYF zS6fy|`Qa{)_YxctT%gPRq;+2$`ReIN(P(B7Zd5iYvWK>nPsF1nL6j}&b8fsPd6)=F zXMCs`4BvBS=u9YxtOZpAm$EThT8j=q8}^uF<&&dCo$%KM?g^z&_N`&jUW{v(5NYhl zXcgo^af^wAs)(8d`;0MYZ3w$z&D;1n`Sk)OD^lGM*vsCJfF&tl3%?+hzvmp0C{axu zL>}0tqHXOjuIpF^uj@^1#!i`UAS5-c%a{z2_EWXmN2y&WgAw%H4}53gqmc1|kC^yf z$%RgNW3HHrElcklo123ro?fn4Snfdti?Vz#pg#x5y_1v3N*NsWb)Uwbx4MsKe@yvN zP0LEalaUvNpJght3q9-WD^tSaHt{ zXRdu*<-+bmdPT!DE|v|FuwopuX|+rTAS8H&v$zW!%f$M?qOKf0wI!d?$d-vc78w=& z+Whp_)f{i?Gko-Tne;HZ!W0<-WI;?|0}73k)x6&i*NR1aAvLN1K`)brxe+o)16mM{FSclxfxe@*iD4y^Gu%|k=<0iA06!t{zQ1IgqzzTHkfN){x z@ehDJ&S=T$^aB71uDv+tq)z@FUit&@F!I8u-_&vb252O5%DPdzdVzUQ&mR|UfQvMM zo4THTl=~j~;b~EX&!Vxakc5_Cd+BGI@+cmWm#-~N&80WgGBN4aTIToSD*9W)eHemIzuiPO(Bz>68;^KRNJ zwHd2P)5i6*MZ?-13l{y$eDQpS>T$22AsiGPT>iGphPmpw#H;pjIW=9Y+HBSl zx?;9{bP(s!YdbtT={_9hdUkQ?y*~ui-6Ob=!sgXT?I70FA@#w_EwXx!s)<3t%*^na zveSLLN{B%ye_{z`AYEnnWSWmdk1TCB#jX4r&X8=wT>C`0ioT`EaRSDs>2h#uTFj$1 z6)|9-npvc5XliYGk!H6(H#@6^1*3m1lR|FCBYokshULm|j{3mM8B3mhvxUp_bVaE7 zDRZG{laRpGiozWI@yx?ipO7n-v&;^0u_5dQaqA+}-S$1MLd=0@Q~Clu8LR;^B5%B9 z)L-fh7{2R%5XQ%w{aD$s>Q-i#CS)0ifr^k0GlL_zg&Zu3lWe}jawZkx))QXMyDu9q z_PX}8W_L1ITJO>2f<%}+Nnln@S!@%v!Vr3FhO-CX_6&go3)BJ%-1u=5 zJbELE{ERA=tp&SR2WeCbrY95^iL^Yf!Q&G_1p-pUK{0x3G`!jg4;}AJg`TV|sOAN3 z@F_-aAiA;Q!Yo+tZOm2fWB}Ax4gndTMP8u)&!+=JTb|t zJZS7oYoH@?jxHCck@w0(3`gLCYE{F-gF8CB-kcZp{{TL(Wx~H2z1wzw(y|Ca*h`K` zAsOwh1q+F}_;K-2CowU{K^ta5?Rk0WV=8;=TQ}0PrrJzIw^{2nJVO(eKRB)IpbvO4 ziZ)Q)$}}SgA=`{%YWdx5*C*e=#V-Q~YAIPMTebADo5uE}Y<@AXuHF4tJmMmO&+V*O zaW=nmYU`4Y9CC~Pp$$`=$vT0R_*?g-Gou>}Pl_sSp|$BQwR6U~z^l{Nik2Zaf)r%^1E zgCxv8*b>M&yE~6D=*v~myX=9k2&)NM+V7uGDKCFzBN13P#MbG+Q)J#UW73m~U_X^( zHS!=Fsd$QSJpXmRfLe1yh@SV`#Wb~`gx((DowAA05Sv8iw`pac)yO2LtRJR8;jT9N z67pposBF8}^lx*Grb@BMUSZoXYv)2oFm@`zG2af+Y_Lbzwc;2eU=p0|q0aPOSsAK= zg*Q73-vr2i5}e}89rR8lZx<9$xP#UX?-Qc->g&s@f}-TtErC~AQY@B-McEiTP z^PC8~aX`S=!k1KnWGUR`;$5k}wJi1f>ID26j@*MyXereF>MB+hHPNyGYY|fRLCUmF zU_H&kr{DYjetTB>`0JcY(@`X}8^=Vwyuh3KBsOcJw8E&cs50>8pJ{ZdbIL-kF7-}L0ew}F(F0!FX?;VT~FWr}x-KjdF^+v68IQlOd#RMBaM8{p`)SOi?;UO%D+jQ~?aMmJi*NKmz zbC!xTjzKfaf$5K5o(X?4)hn{{Fl9my=@@@R5468bG?>h+rwCXa2{GodOZL+1uXXPw zbK*pg6gwd3lH4R^1+RRZ()eyEdy3!6cDISN8n@Yj-+uLrf9q<*g(Kl1+z~Ch>CHCb zhubggWscETHh=NTo+mz6$uCaXbF}2=>UdlE6yNCV|73&S(s+9pyC1{;J^66LW>5aNi&tH zuyTjt8t%OSSj+o%V`!6yzMlz`u;eXVq;m0Uyxaa8?w7y^=kDL0C%jPfzfq9i^n3uy zc2T`_iM3^cqejm6cF)96L{lE@m^Sbvz=_`P`9Xryut$sHB z`b$sjw@#9$fQL*ib+w1z%6D+Pe!cUsz*oF7Z=p3B5m;8z()*POpo6>RQlc^I8t=kx z_E+M^ugZA8{Q%gW75j}uaR&5{^U(bOtoltcQQxOuZ6k|WbZHQ@nF*t^i`z7zIo_k3 zwi~oj^J+KVjN(`VHJxiyGX0kCas(V_(~{Ab=6-pH+}yVM%%V?}zUnx$%$kH~Nj5MF~%yr&@|frL+#2ke%MY z1+ur#f_=DA5ldE>Bf&drq#=-oqNxT#*<*?wET)4GrVrRg9Oy)c25u;hO1{24RPW|- zBi%2dY@PGDZr#*PRFEm#BHqoJ@e(vYHm3qp@I+z?DIJ%Ry_2-j^_VG%KlX} zKXJjoj2=#$3@>Ze~uz9Nc`GO+(Hm!vHjVmga@92G}SRCbZCp7;L|RKh;|pM zM4)r!y^A1pkB0+rj+`*&dYBT^jG7yQK1HzjRdT3jh#rz5GIA)b zC5pQCh9S*zGdH>Y8@;Fh^0lCk)c61OD=n9F{`G4?c)ECDAL-*mY~Tz;M=1xEx&o0H zZ!d9=c;rIorjd6FQm{SQgTP+8+u!mzn@R{+fQ{}hZfeOdv-B45r`U)yaAxR6DuFa@ z7v7tz*UjhDr#|Tf&e#)yg-Qjgas&VAYHQ3rhSHGHuo#}U~_Ndjt{9TnB7u% z0oPB(gof&4kFa}(H!W+DmUh5G%@17qZ^JVz;y57Pz{Bub9p~)2vcr0TT^1{}YKi6O z@i&^D%)WB3$|&ew5iG?bZRm42h-%~p=nLKEZZ8kkXO81I3DBCiQh&?9U@uQFh&So; zU^KcSDOO8ei(+RB+3Tbg5aY!3og}}%t~B_xVP+I*oA1QA?XtFYx7U`xa|v$i+pv-T zpi{byOEoq@WGeVGakjT9(?rQ1@%Ifui}l6^Qf&foiWYL&@wCfwwG^9#`K7LSGW7ZI1# zj}l!|>Bb=q!RB(6EF?=_8H0^pMbq$9u;DaNaN z#^50F#;qR^j%3y6Zkl#vsWGa#ZAx)34uWfo?>)mvji zT!ys#Up{tupN5V{z3!c{;=sCr{b(PgFOiH^F{Udj)^7CGFK@dDg+u=V~PW_f>Azi~n-*Hf3jw?cjGp#541uKT!G?>$!HXQaQxdiQt??nruG>S08> z>^s}q%8M^uCx^U5oc|e8upJ?jB#hQf2*mqhOY|Vvdf} zV6I6{HhNMGN~6%@bp*~hB>p)EWhQK2Lo`~x*VzOFs9$-qM3SXhui$I@?BiAL{+K!K z4+`unu4zTR95xT5t-~jFw|rc`!bFlY@{-zBOaqp%LE*zoB8*M>I(@Z~t(~b|g83EZ zw!nI9a7VrJA$a^U^#7J5o3YAKTu18UPazWS=QBFCQEK_eIM@hC`^%fE_PB7B7wm+( zqsauE2xg1k<}_0}({aTx$yqHKJKXH$fgFd2cU*q)*Y(lXUlu{hrvUT6zEFIv|FSX? zHUjV;{AJC&BHIr}8}yd$p75gO(jFWlnBv!T-V3)$1WBKu-J}{(Qh7Ol8q>z(EDi4TZyya8O_1Sh1au7{Wk&E z;kR}{@!^wmGFcOGVk2K%HP;@*t2)_fV|^+DSUt0Ir52^qJ#yVNUN6lh|8-@Q@brI6 zYylS9P9_PrZh30c%O&)eR`uYs7Rz z&Kn@RExjpZl05~x#Qof4P75;jQ$Wl|&{g%Rjdy0~NXvP0!D6;wC@@?@k- zSt_dRyM5#K)34uz+aI772R?(fI@#~~w+cO=O3SkMpjTS2)LUQS6j_-=8-H28bM4QL zaCqDu-`9O4&*oOl3>Kdk0S%dFH1ac2&bl`kpPc*GhKt3a-y80%cl*sed7aQ4K|1^c z(17!tgf=yZ*r@o;IM$c?(N$@jww?Q*hl{`QtArN(Hk<;tZ^TJN;K{y4^1HE0w#rcn zc;{mJRd`{MNs`ihVE+?+U%#Ffw;e^ zj(Fm%mU+V_b5k_y3-n(312y~Vr(6P0akc+G;bnQ7ugxMdwnOrr1V`s5&-%&Vlva9s z#ka_5<-JdCf%3Fl3v8y_j~VMtF?(Y_*E1cE^G{nb__0JK%0!LUdr|U4!zRkutauB$ zH9>Zni+s+QTp))v+juX#r76GRn}WPu@RWW2Q2+^a+C@D_k_KBIj7!+u1v-2y@@@ z&=3J{qENCJ0Whmu^a(}u_h)Qsg7bB>`}QBNPnshfMECbBiXJ9o99wrvwPL!f*0nt8 z>YxIAfuaws6%$4*Aa88;#;AkBfeX<(CE7|3c;YIYM2H()9LMz?@y&c^txC&y1$2Ee zR>Whe3_;jgE}<-Jo*tfu2h6Tr-O5TFYIWz4kmw`9nNHF8cy4OKc+5HqZNl^Qc<(>{DozEW7q)gQ0HSRxkV$kWQ=%g9W3Oh@MQ#zAy z*NUmS3dW~xs0HCdH3g#(6$BsJDYpg$61d0IZp{Y%B!0hoL!(&N0J}-c6k4R1Vfkvj z#QgH5+U8o8wAjxOv(F(Clntj(pFN?7Ku`+$*6KN_ne*mzldvVqXMPku^ILDS@l?hAQKNh>a@CVifVMKf&tm1dkaf0 ziW`bbJzc#ZEe{BfCYpTBg-k{+Oy_-OJLsm4o;;hX`+YW636dBz8pUvrQp*_kGU*OF ziV8Sr&(pI*d{%X5NK)%yC+CC9fve6yu1fGDGjY=V)O)2(x`$zL-LQ4jyMDrTsbh^^ zPG%fREnt;YtI=-=!IB4+>cvAAUqyVuOfqEUuh{GD6srVzth8%E`A7lUF@81KF*2Ur z9?yOw7uu`k@1OLz_J&Hjv6&cH!FWTIHR~smYh`s5E!9BmHI&zE^jQ^0mQIZG#A-$? zA!zHq+dhw>Ic1eTY4RRg<{%zm&V)-VeG1r+GI$U%9DJ5N5*EJrq&y#SXZvK7uVlhf za*88MI7BPiHl`vvsO=0sVLv87ZoNAhbhwE@fvw=_7BSAXFE~7B&J0rd)xyB4vQ|vaGw1|k2 zO57?nhM6Epp%*%a!h_J=&FFS^J%^Bfz><+h;N;?PiYHoIIFRS-$IV!XD+w<_AO?bV zQ^qpAkx7w+Y|PeZbU#^_d~9%gdAR+pt;0H)(6G%lxyDIsj0w8Z8D;ZA%svA?yA6RX z^Y)yGD$3+ywOvq4o`}#nOHG(k}h_jTa2TM8?WQ7K;)@tyqJ`2O_Yk+78~y%M=nqIKoH{&bAv_X-}}tJId%U@ znrM;Qs8j5lyx>LuZf?{X<(%_bgy8cThdN*Q%?PEHoAh7ayHOgsx0~y-d~8zAr2em3BR^vnuk967!Ado_?^N8BJale;fsirZ11+*vh>9&Yn=c zy`Ji(7^{S0_6*c-!Z1TSix492W+$98iEA(UHf=i7M7Ij>tC1Z)^m7Ai@W9rZ8t&as57@E(lU zN(I9&7)u0~5XetZU%z$b+dVX0n*fVwu0_70b&FKoRIKU{=@qu=2x9j*MWuS0_6zv$ z9y1ux6&TGW&nNrv9kSL8+qJzmI>;Ikoix7(qc1{CfJYBmjb+V(Skh3onx()>H6Gb` zEWVtm;fLVVTQ-ro>$WB$>rNDvYmI4~?4E{7YFfEe#o=nL6&}kDYcbCE(3yc6GxWg@ zG(^Q%uYw(*-ooKh@gqWDUw#_hxbTxfs|wI3BxTSPBtup!8X~|p-a}x4`+r2bcma@N zuHM}+1#0)HYNzxx%N#E+?O0ZjqB&>p7R}^xp+c9Bk&@1r%1ZOUzJ1{WfB5?{`{RX4 zWx@e;qcBRjbKS_H3FfjhT3?#Wzbw|F_G&))btF%XEu5)KdVqn}y>@+R^2IZAm!UEN z#@XkAmyFQzYkID#ZI;|=V2+THx-2NEB^UYX1L2nJjq!Q`#gkV!DyZX9N`!3M%E zaR}wSeze)@5z8t)Kn{Wja3a``gAdjp9DQ1(uYa+$|9QODq0JWgm;?4I7RHjGD{kjz zVz2zLzQ1NgI3Xv-EdtooHXJN)=lY0xo^{)IIosd?8v+r8m%L&zv?HNJiVu%s)AAH# zSX$O7(JI%SQg*^@Xb4{KHhl6Zet*z70Bn4GO?V=TrPT>D6ec2ezlN+*SF1?7e4LlUuhoic1$#RX~vpJJ%=R1GSb?x~hnNNO= zIWp#$V~%Hx`v&B9^EZK!1z#OW!|5ZTPwcJ)J_KSX-XVAf?f z-NPI!FJAwtPO(bU30a?CuT@GX;rRYb0z(}E-ioz6S`YN^+*b6NSm zG5jE3=z$v-7f2j9t6U8V`{b5>ObMK_%Hq=Ojik=d_PrDL6p%kuJ2zi{w;tA8uqfnh z2XP$wcqqby3TM28cu9P9)U;9K65os}j%j;tQ@UjuTk_75yrZHxx|++OvyR(y9crTr z(p8`2iOmO)V2VC6){ul?Q04|!K7kvbxlmShH4dy#FV_^+1<5n}LFc0I6e<##hz)kp zb5YJyG6&D)>gZW-fH?uxn=(DhVm7+K@xwHSaGip|vT?&RA*S+6DZxEJDuie_MQL|i#DeJ|$?$7Z9RV&M4Cip!w zfk|Do_mhZl(EtGuT?pqQ8B<6BT#o~rTyWM-_j4+>e7CHseBpbLU|)NSR#NW4%_Y!Y zc$zO9sxubwk}PvG!9S_`e#4k9$Wmx<|L5+IOAyVm__uP#LN)J9tz>Ed9qwEmri-sl z^XxsrP_#3UuM1qiITf>dFxqh-r>k6aYExbt&@9Pp_Du<&HagmB%p4%2$m2hU>LS7S z3N$6@acXRU(5p}6$#2^6-PCI`P*45M<)FmV(*Hgf+-@Xoli|B2(0Et&xTmE#B6sr# zBG{;HV1!&!5C)A0SG;135!`Qb-cV8));>Ni;Z=vmb?Rr$0~-gSv8c0#1niO%`QL^} zr2mQRs`*itPMuY|k=wK9OuKJp)-5Spb2T!VxMLZo@R7`RR5q8>sIBq^E#1)$)1VVA zo5t*z7MttUr;FE9SQ#A6&8;VDbuPnGrSLiu-Fzlg3Rc0I<22Y*KW&R7uiH=OH_Q=a zWK%B`Y2JBDbVx{cRJfM$z-9p#+I9D_RF%=ZtYPDcvQ1ki3STN;#H<;z68Vw!P)Nsk zrP654(BigKnn|#HcHJRgK{&BP(ngdx5-@YHlFtwLDV$2jTFpVqLT?P|^1Z`-xN7jj zm7?yE-4AARtxBWnnsj>ekgCDk)vq8nr#+r&0m7^m3EnIgi7h&yV2HF#W6`N`(aB^S zxaWtc);cgSI(#+#l1?MhF2Qb~Rqm!d^R*rL5jNp)eLM!_&5FoZcb3@Al=jCpklC>a z2XJnN->e#PuZtM0wS7P}bDdlfaU8QVkzQsoB9Cp;z!qA>Q;}daVVaW9{#5ecj4K`g z2NCaW^X^8~B=jOL3L%XAQjSUr;xJN?S^SyxY?h4>sHR1^^Gg0|ZDGS;8r-`<$O(WZ ztRYX##O3$!_|*2cCxZV)o~zx%TRcl&(te1I!Wm7x3Z#N|>*nDgecN6~@!hM(7&iA@De6XCfUxmsRQO-yEiWl87Abz#EEqe}8(X5|R?W-zcHlX2blwU$k& znxIo9reWbV)u-aA6i`0Be*lkTh#F5NpeYLxeqq-FNsx{EV(_JjS$u4hq0|XbE3+g3 zpyWEdD`+(SJ&r(<2JBNbaHDG5TJ4e}E!o5dr?n5T zYvQ_Dbm>uW#Q|RyizQXs>dv6aH73UIoUj3Ytz~#LhtBl`M9z>lw!?!+bKh&YdDUo; zg-#PUH>uab=GncMWMtCE#r9A4F{pv4_r&nnPf!q%px?ArRL^~(ls&WYGcMd4D$T$= z)~XPKKo1%jJi!R4Axbhf^(*>;T2Aa{Y@RR_=kIIKDDCI=JzjSK3r|n8Xt_w;VVRfj z%Peda-G*KuQGKd^khw>2;|8x)D_DR|Ke*i|Bcs%!1M~YZ1)nb+TErc)1%ghI#PO^c zh{_@cYEY=cNv|`S&r}{4WKdJ~UaRBVGe~O7yyDDFRnMu!r_9nt{UW#!r+iBw-#S$L zgX4X^K%r7o^HiELn*QSDr*bL%W&Qai2fT_=>W}9;p953mlKPgt3Ll+hnculZ(y!+8 zHY73ddVuX($YQXDv13}QzII*Qn4f6J=upT$&nQ7tX$uk?fpsM!AUs?`Akf`bZD3fQ z>4C2+dyNVFmrP2ETFK#vNgRrq)kt(|E)ynr+vaPb%yzl|fckb^S_|CKlF!Y^u_YjpDQta|xVtc^IMZ{w_*4L9(Y5eQl zSxGR8`2z|6z`gLv&))of*7y~CI0^bdHjJpN# zVBI-(|J(58=#=zZcA?BWC-J13sG(mC0jD=<=D5jUkGNU!C4c?C=-U{G5k56WF(>c1 z?*v0egbVuzRVR4#_WVD@EP zt@kcRetMi1-#lmiZGxnI5)?Y9=N@ch)7Sz_hnTi|}-;c5ilZW6|Gby2OtfVr%j7#U9pU-vf85uXGLB8f! z##qFaJ%|E|K&qcBm^}hF;!-@p5}@YoSYoJ8W6<*1eHPgh$ad4(HI=VN6x2WeArcwc zwDqlSX;)~2?Uf1FML}PISmE-!W$)4_z^&H8G8gIbt;?3+vfBlZ1Yqi#7CO+aXE`Ur&P${w2=+7lE0Zg~Cpu)oVQpWUP1Py_B3vP9EB@7G(nD0R z3wAg2M%tRKyX3c>lD%}3zyKD)2`Jg!GWO%&wO7yv_B5oBO)Qv9Wm&DK63wM&iv)kh zSe*h>=muFZ{`FvkWvIs7gRRzFlhTV7yMHRge_rKmwkZ4SspQ!w`RQTyab2MfSMMDm z7O%GDa>;Jm$WKoEqsc=t?@J6EOK9SS5U!7sllQ>l_4Y!&cXzl#(M(T&pc{S<%=FsX zx<4y7%L!ePIzwvVc_xrbu|Kt*1w78%w#d3zBB3*B{TnXbZz^Tf!{3eyDaNy1jlF0@ zaD8|CMK%;c^tBtS?Ki9yg#q9^dNB00&L+8ISkch1VxDfK$Bc(zh>Sw{1*znkN#ZY< zPGv7sgJ`!zd;3g6vpj9pZrCs#-553gL49S|W9^;DRv&9w$Q&dNHyx(y#US!vlk%Vfj*7x@8vGM(AQ{>A<71nWf$s?+q@z-WcYeOLq>Xrb%)ow^tspu7XKS{e zRMgD&M{YcX9kh2SrWF7a7PAVEtlUkRcu(&Z%3cSDI;#sn-`5K30yf3BLTI z@MZQqBhcJTmbVJlatyw;+dxhY%O8@-+BMfQ@rPE0FPg20-Tv@@nc07QF;x}8N0iE# z#%c!-k_IvZ81CywZPtslI0Rdyuubd~FFeZApgW}@oG~Y_J-lj@IZ5mJDUCs^tl295 zA=cGVvtN9#BlbGfCO~Y#$gNV{#-!#8^O1y|62)cMerS>UlF(6R#86KAdSA(jtJ3A( zCpzQN!O^BKjA=9W5U@Rj-xq@(P#ptRrcJ0b-%8aMqPt& zEgKnl;ZiLz3Ld=SP5!FY2M217W1)WC?QS>n<&(J6@6r4sMS3{!%;mvS{{dP2`(APs zE6l_=GunwKw#e~`KV97D;x0f7UX7Y=`og5CTV^p?zW)4|q@kqlR}3>}UkWE){$2w| zD(;-Y@>Y}RS$Fecu8mE@FRFk^z-^mkO|88N57PbXBK}Mb4F&|(7%lyDKat#iDD=I zi82D6b3rw$UxN2|#YCcqi2nAu>ZBz)8T=j=dk&H(p80Yhy9JOJaOZ(Db{GKSN3(_TIHK~ zy&}{Yf;j@WkSropYUr1B$oOd}(f+d5m$9SwkcOc3loO>8f3ju)(zqbMYV*R`mtpxt zbmM{kPc2hoG*G>vEX#c73b2|wVddU0yuDCb4sQ-A|3?hvU9k@amH$CT-t$XC(9L zaNeLy10=}f+mAovJJR5@IFEB;hUGL}r7d$AnIi|qFG|zS3M4@3Y<;O*E01I5k+2D; zWab}Bb_)CX6Z*09?b-V(Us!J|z8*o3Es-{`{zB7 zXE{q+wqx@bLwWQ}lM~DJhyrM)Q`uZ|5Z0MXZ@t-_ebHWm_4<~s?N1re1A+7F+_&;I z&G9VUK>k4oC@W9y;>p8%pZ_5u*;Ug9hNxZn@p#H{gvxN4FJt-rKCv^4ciJMGRhk$r z)DWYkXFc>urb!*rsYPcj1S-kR&AgPr81-x0KsoNfmK0Y(E5(HMwt{eezSa$C_<(6* zOU^Nfrv;#HqslpXEYcDBA@pZj`^uGS++2Q~%Le_K9U`oA;4XO#`ryH-OgaVw(N*N_ zOxBTsWU&=kpwdTk36c`LL!<6*T*;*w^u!1f8Ci^3$zS&XQ8yE8ztyEfY5jAEf<3_VLa+xcRODTSdJCF78BB zK^~QJY{=hIMBkW=m)VfT*P6lEMmdb90lZDhNHYGw*)?gY3#5WXm>xki`WVH%QCvm~@szf8MF= z@&)bQem~(zVu^?wB_OYmUI*Qv{6D#o+9x{92IdKM%Abq$ytv5n_BzNt_t==xd2Zzw zqZzq;;3+r8?NI$NVNE!I@7+*`4C{`;m%Yj<(AVHK-@|mO=6~q)?>=r+7A+YG@jgDb zE_p4sa*<8+_9_}hTo8A-nGzYk*S)9qFjIP}^xK@`M^5637vEikEiWXF2kAjCW1%Qd z;Y-a!2c!b+>^iCIhc0}d-2vKR~C>Nc5zDZEp?9awuf+pu4?<-WPs&nn&@4aP0? z_dS0(7FRNPwUmHC3VnVLpM#WS(c+Ube$5K9EICnTcLuc)ndUVIA0?lRHye_H(wDWa zWh8Y(-|>)4=*QHx1pexM5c6}lp3bnz(uotruu~}kQa^W*8BNPC56RF|EB)>&RXl3q z;>xQNOO&MM$Dzpk1x*rG#f0%dY-seh9zZkYP$9pY;MgUCArjKmoj zgzHKyPmgW{7Vt>b8)8{Y5V-YOz7_rDo4Q|JuzZfDb5$FDO%y@cx(s2B;QGDirol=~IR7pU<$#>%_`Fl&x zDqESdmwa~HCHx85<8goq2G@M&sKhLv=7wC-dDCy1x6GlhS5x>rPk~$>X-P*v1^Oj& z&heeB2eZ$H5vwh`sNI}p$v1Cx6C0O{e^-C2a>DW*9bJMNP`~ z=d+G(z(Gsy8gvH%vHo=fPnM)!IO~n-52N`b`s0&1+o~m zwx0hKG`pAJ(QLX%qvz4$x0IpyeQ;S3Z0BHd>M&j>ty=$l#;(|t)gQ)bS=!{s#eFY= zVP5$>9;0M5-I{ZdGG4*I$T#vT!4ci9-iQp1ZR@+ZdUJ{t!{NPjBAz2Gms~%GKW6i^Dniz40rJTj?1{V zFDHzfLS=#eDeX{>TY=W{v8OaMivYRVieiSD9uhc7T3+by;FaXz6Dndy^zexu@sKan zv{1ihw!wE#yAT2PQgp|vFOOmP$24Iu@cdcEkAoZgn^U(v7&adENkHtmEc#V?mK=VO zDzn6-lP@3h4F+jEH%?3B8`HP0DMDh2f~eM+uk@n98> z568PY`q9tG)3z?$JGoC!feI4wGMB5YZVv8g0dM8ztbW@r(fy7}2{Wb*@@QG;yXn!h zxOb6tGiKb4Qx}ku?~!^P&#dr8Xw6J!;~1je7qRG`5R^oD4Xl(VFPv>UJ+HjH z0Pxk3?z|D`4)8?7hfDiC%o?mU*Wwz1R!ZC>>b?!jJO7fI z%DLNH9sq5I&Y7eD=?lK1SO|b(YKj+yN3$A?92aG{qLb+k4UVNx;c&OyG`+d zP;8=BVzlLZ8L5P!oV5@ZuIvcvdgz^@6}aA@7Rh3U|IRB@`o%wydvlu5fZOE;t9o}! zy_O~+78$tc+bf;L}{k8eu<|G#hglGg2q>g+8)=qqt_35V_1bm=V1>gQa&*AI+}cIEkl>~3)4Px3pJ&)Za@zV_xjFb{6do_dL|1>8hLO8{9=-T(Jz#+gq`KqR6d@&-0DM@qSR(3?bk7sa! zJmIYQpu0K+jcNZv_1xfLW3#XlXP@6Ln*)OAiwv}%Q3(hc} z8Se?Vs<1Rw?()p41~ZO3s>)RsQ`Q`y(l-{(tH6(K7BX0!TJSPrWUjnHT~%hK5s-8Q6wg`(3cM7C#vF^!!-8*X8=81)n^y(B^*=)Ht5pwF z)#5$P44|{mlEvKw*fMPZ$FLOh6Jr;V2&?%L{2eATb)NTFmdf8EscSw}BvMwsaExS0Cu~s#79OOBh#lOZ+JtFyxKiEKx}n9AqCeX4ciN-z78*8i*)?+q{M= zA~Z2W!ie4&XpzLe>Dk*tzVV?v=kN5c4Uh7iHR^iDU($N@HoP$us<+x|>r73wFfp0J zb}ZQNe(^)tiOp0W z8qRehq_#v|{n!|;npCjv!mv<4d#|T$Q#3%1y})E!!p-M|VOZ?vm`IH7d}n|DXb1)9=sb(-om32M7Yb?08p&2&MkV&ScmX=cCk$9w%nL}CjTDj zyOwpZ-a)JOBlR^Xt9dhGFRX$;z5mc56@2o!CiD-oOsyiGJy+@XA_7-p*@WsMGjtk` z-d$&LQKfp25$!xL%ROXa5Z(a+8Abd-#>t-)&R4e!sb!B)!OAy0cr7DP&ctS_^GnMVP!P`YDh_4ll3hxX>%)vsQl- zgg_hp``asF$H!9w?8kv$+EbnArj9hU zgutEvPX6HdiOrIFEy-OTRM1`_7sAezA%SN!EmGIW?0vWm$FJYND7Q@oyOxYaO0)*H zUmbq>F39l4>-Ai4iV?B{bq?~?!DZa262bx$#sYGE$?H^4Q7KS{G|4L9Dr2)y^Bd3H z;z1YSMypqbg=23Z7prm)XkBC&?g>$gGGs<3k;-5{81cuV6Y$DEG)al*w~^1)#b`rY z2tHS56;3$9%q7Jgc(%D(a5JeA5QHF?rL?yIh0m&lwXRjM3vhA}|Fap5-7YSk+I;tX za*8tk@#>6g2B0gYwjW(_I}Fd(HlSdrERhtyXt>uXJZgX7d!_ci7#m=y&Ml)AnIamP zi6|C&Ru;K~o$-wFM(H$sQ(*S&Gag8pJ9c$j@OHX?zThYP?==bktlIy_XM2`*H3Vu? z&o16j<9oRfSiWXiat^>&I3iJ$|6o7@2?)x75C34n6a%Hh@TS*RD(MUI z2{)yX5&#bI!%BfY?*RV;Uv_e>>+<6Z1`QKdW@I(PWnO`VXB9$Q#a)SZH;hf>XBEPR z10vX*oC83%O#o*j=QXvkMiwD8`pI|O!VVBg;nxXv8V%)rZY7=m$wD%VE)rAW2c_|X zv+5}`gux||w_?--!OjC*D|2~#H?Yy6u?z1VY-7qgc+OKjG>mTahCp@QZZDuqsFTib zIAW@%QN#O73B3Vj0SHF5B>4NPn}*R<#yAIS-IqU353Lh$x8OR=^vr6Mhz**F5|E2c z4*?|N1QjD#`cz-jyAehG>4@-b~po zP||ns)TMgyI&PkVum^-gi9D^5exTiHZ?0`Tk?eXmMga6DJ*Ood379ANueQR(UOP#g zwU70JUl+3M1dzGoe$l=zOL(>Q&h&T3~0^rD}Nrxc$Q#9#m6O68doCId!i zmZm)h6c#2==)7{4_j(Oono2ncP+m!FM(6jZvS0HzXd6upXZB{U)H$KZ-iG56B@IjW z>nbI`{94kmN8Z!Zy0!CG+U)uCn`WYJy|tzE*UK_%DdAPQaS?i>S{6JCIh(99mR)=T zXq=EcF=_>rJ?J=V`EjhRMe1BymeW1a#=qNZCY50L9?DTLwM?E7H>MOT<%{haHH-7S` z6N+~tU8JrMbQhKqye2$>G|VzQCZhUmS-%)@0mb1?Jf+X7#ixQl_3$WWMuph6-fUt1XKU4`2XAI5cYudAv~8_DZCizYB5y+Hb4T0 zhDP)0?q*-#ap60L_?U4|0Le2-=X{kr+r`Jf9z1Q>eGy)_(^7IvLzyd{T(d}^C?&h3 z?J~K<{WK=r&$3kE!W}#w6j?L;U_(kA#VhGPyHrSUH^Xfi&p+>*Mng(H^#Uvr0#Ys2 zx^0X;f`)z>?o_=90c)$&bb{aMWcf&Fx$!d8=D+|13ZN%3%KYYenW)Q5@M(Yec}?Ms zgTkQP#BZ>(EvpmMPM8z|Y1ao$U7W?q>!Z#vdUAQ#pm;YI>=yi*X6rvOKN4e(P&J3g~ zoEnd>Rh>3V1au$D=mV(0MWiB7R1^WKkLq^^HektX`Wx+=l$*xg63Eexpn}2aD?Ly> zOPbb*bsl^cc4fCj zYMO$>G~BsR`&eCo@(uTl`^W+*X6BtDQgxe^tlf#z7mM0}Bp0a@g?C#&j?Qv5L!j>X zTlDCJNQa?EWIMGmSPrP1+{P?nm`025Qn|2H^XavJYyaAXUT&J-hRS4&R(m z4!=s>JZ|-RNjX0*7tVxMQ|uI0lsL$ymQ8Fh3e)(6b$NykRy1ubh2S>m$(3X z6+J|`B0!#vQa}8&4D+Yh;p$-c`cx_>qBI&r!o7Uyc>h1^k?d-O+l()%tE2`!^tjf- zXteDxEMgA91H)joJ_&GfOGP4k*&RtMD5(!ii*=BucC$q2O{;z1(OWnDy(E!N5KwQJ3tb{dq)i6ma-^0uv9;;=s=FaFGBzNoc#K_X{B^`@z%{|D?hH)?>1(>fO9Grry^Ljd%Ri z=d-fQQOCRd%K{BmoEZUBkZ(0(vB4AlAg+^v7J%IT?F9bAM#5I}V-ewbdzHET*=g8v z?1humYn5}ZD^R-#`?`7Oq5NVC5GU><@=sqvf=4F#l61Sw$Z{z;nr5a$1K846Y5cVs#%wt0-R&}379xVtEPz#+YcKi{>XuEv$>-r?@1YPlBr)VL zX2D%}=mD;FkK6m^r2kpCTFJiER?NItEu*(N_o2K3oK?t_=%b@bB8og1v^&izoGUK% z=#B5j={jf7VoU5-)7Ng#=;dqFC9N3R^;jf1!ZfY(-L=;4kb1wlp>a`BO_D0&l#sbU z$f$lH=f?IywSnJ_m(vyEB4y4W_T*TBb(KHXZ9g%Nv8GI_^l6}F5uF*^`U5)!h07hK*& zff0EG-l9q&w9Wuyk8y02q@;N@jDej5fMMa4iKGDpK0hJxRJL=2d&#y|4l$LAc?-x+j zp1?5EYY!2^_LP=xv7qv3dbb3BjJ)bn?$uPIHI?96R>ul9!P)nUX%7C;g+3zk@4^qmRh0Ijo%hv`%jeV)A% zRAP5DChnprAV_N50mB-)f1_cZ`0FIJQ!J)c*5-$iLDdmuVcCR^Etg&6lIhsPBasPA zgF0%aKw-g>-bMye?~`RozHI4`N@W=`t2=a5f2+G%pXL=kk+kL_<>$wW44vx@V<)Sg zhth!b%5sM_t4J2r#C+yNo_akVVd^IA#}|4G*C#UDM$=eGweil;wgPo3-J5A&u4H{a zUF0kMxy@{P*K0D>b=%Uq09phrF(`z@wagLFPK@*t#32+Ac6 zVV3o_$=Afcs)<_us7d}^%k&J_n#*V%0yrojeqMwr7D!1x?RQy+BUyoQt3TJDYB2D2 zf8(^A81-n3mtgJpO*2~kT9ms$6BpM?Y5@t>zFzuQoB2(pT-*pd7oOnka50cCH!$GU z+pnp?%WxBL>vOfooOa4kWAYD6gGIM9`P!LlCBJipMj=en{~$A`HFYqslsxz;K_@Hf&-y_;)DOqq0ERDCumcLsSfLpIc1yPOs56ElXx$4FQ0(eWt zpTx^gN~c#E`O!0p26&fXbX!6Yi338T4~Ae?$($2X;~?ouS3>|lh;beE^kY=)2!OA;w13e{ zCJuhN_ZBxc+-TWtWB>=L2XP2*Qh~HZmo(JY)+K?7PkMYtMMj>7_Lcc8Me!yb>Bxd{ zS@N&96Q|2mWImxUbszp}3;(S-Pey&ut8E--9!MD+*YexCb@&Ef2-dSP%~&p+ z-ZVVT7%O1t=J-%d*Aryn*rFBvehSe0eDDAnqQU#S&o*Ny`2Sn0=@Y?G>FX^|?TpNV1~TVg4DrO&b`r}aEjhWPzu_QHqMZW||fj5d1P!ZLc$B)Cghu6Q!V zQpVA}%OZ+!K`y@&3AH8WY#^+A(P=<~FWps9J;5(b9W>(R%uq%~O#7<1hx@G2BqAw`DU;}wJI?}{$lt5x=GmmQ~^Q|a%t-7yW z*7S834Q%f%BFcuT5TR5i{p}M zSAQ|CN{tO(7;eJZpT&d5A3Rl11@ZZtv)dzWoPc$Pe~_IltzAj`4^znWqy%|8l-#gg zxwALg+O@;!B>jmQIFD}D2W&WShY}xj+BmkPnL$c-HZs$!re3aQwuf6tD=^4~&Bs(Z zTyJ4lwW~Zvr<}w^^YW{iAE~h6KMm+~hG#}Mv8ZHq<_-~3KcOz>p~l@n(dI4+Zlmwn zMlsLk+~RxDm;(UZkdyP-*p3D7&0TrhuJUp@+QGfZnwR#C=`ptEs_V^+~!DqZxu^{nK z@;3*>W3kTA=VN@F4Zp4>2ZaB%=n^Dqt@nGki6(^qTrge>YC%QK+=9k4Oy)Ba*0l%hOdb6DC^@PqGN@Rl?V zKZWqz!QR;%aL|yNTr!){2%)!@)YtZ+JSmXdhRm|ce8Ck0#}@t|vzFL?a8vV8rzIA( zTMh@{AA=9>jk}HCOZ?7S=9c)Ktm^?yb5cLT!CD5hX(OvNFrm6MSND$bHaw|mD6ySf z5o@GAjSpC#mp+sUqu&)4>o#SLy@Q^0LNOq4B$#GZZ5&VXT~#iTteXsEU9M7x6!{(S zCOKp2A+v_Ypc2XkdflEp)KnoUems$cl1l1FJ-@xY5x;haS6^R}ov_6@XEGyI4CqSl zw6Qy$T>Ep!8gjHcDt`D)K2Epxlg?f>*qwJEC*1LU;q&0QzW8ZZn9Lhb<57irX}J_Q zwU@*MtjaV8t_h16HWe3M4vEp23R@f7rWp53=rD!oEbFK0&P-=O5*(`q{XJ1{hgz`~ zQKVS$!P52qLfG?gO`}^UE+gDt)Wr)rEq{=eJY)a-AG`LyIXjSOg|LnA!&c-?AdJ#D z8hIAA8|YQsR0?>!9n}QNU|)0)ub3Sk{w0qwAEgQm7hfaJE#n#kgOmD|h=dur1THBv ztFIbQJ{@o(a(hpCax^an!7?8lW*4tUyO_!fF$@bhSeQceh2q-gOf662nz*;LCa$~Q z>@Tmk0;fonJj;F+Y|SzvNH_sVoP#{cwKn!jBdhLbN*zd1-*F|Fv)>@Y-ln4(!;_3apo7Zgw>-g!1BtsH( zTad3eFB9bI%bkM(d~F0lUsP32G;H^84~$V#iMj)E*KcA>RT5Myp2apI+Mb5?8SWLF z(j1?X1>OJO+Q0gJTUU85BcP#qjxbjAC*}C5HsPZ1n8XoCp-&?poZxihT2n*2C>mOK zO)T}n^T~{&B=51;3pNx7P~>x@)@(DQSJqY+0{LfTLC3XC_+o}TbpkKP%dR#-bK5bf=698xzexg zXvETNLSg+VLTGuNv8GF)>gY&`H56UFJNMD4>J$>h#FT|rDmFpsYo(4Cf6}!^hMa*nub+H3tUtn z*&XsTn>?=}N>63F9atiZf4XB=JnU!R-?NkxIYFjn0aziTTpyqA8Ee%hr$HsSfpJkE zf*Sn{++FRhO#0QMywlxv{F8z(y=WxV*!CrX@XK)S)+J47Xf>=;W_H$Gvg6vphbs!nE+(p>ZQnb;g3( zx#_ZQ9(v0*TwTS2(sY*>4TZL}g_7BclMpqS??)@94QTRYu4J%RJx!8|Wt@X4#?DHr z@S?}?St-t){horK*3EI>cp`Bp)bOwXUB{g0Uid)7>pS{-&bxHNg9KN}1;( zHPDN-EPK47G{vif^u_Ho*r-|(5N;qt#-1fLD&^u5#$bJWGV->(J115-$URKK&)fCn|)} ztJ?#OQQ@q~ef`&#SG%DZzwI zjJV%Y<0{W_ge5bF`nM(nW`mpLrvA}t9_+!yV$_rwCmlQ+PVGxSZz&YOlX^ccDjqjg z0F6r-AH*kfv=s6rmyYH$u3!DSlFSzExezOWdN4>Za+g^Kk#eOtDH0I{w@B8F1o>p6 zwFUz(tB|MRQ!{mGD@wDKZJ+87I0OkO+bBX@;6v#Hy$qMB2yH*pR~DW1-KJHc=KjG2 z1s(H469z>xnm>#)@|xEI3V^2;kM;^Bdo)cb*%BUgg*am2U)v24TL3Ze;SL^JDBrX~ zI{RtgFdeu(8%pG=?`0eUO*K?%`MHX!OSAI7Kp{9JQUQUmU~a$!^KNSJC-J3PZyl`<$!#SEmsq(PO_Devz^6W{^&&my<=DeSp|A(#-iG@F1< z1deFWt@mIVg`smpaimp0>I!q&qbp@tH8xcAr|%Wg;Cv=3zD;EgaU4x+L}ctpti&VV z6*mAt+%s{6(I?D7G?L8d6)iZhVGKl}3b6-k_FQV51c{DQ=irc6Sj&+F zI~9eugbP}<czb>&O=Cdj59hA2CxyQtBDXZ&&z0u+)e~_`sPTEF`3=3GU&LCsDa#WZJ!0pe*_?Zel7(G zEGWkLKP%w>UNPCt*_vx&s*5#N+^y|jFY;V%cWd7GS)%uy& zO1eF7KlNpOQ~>C417#b!N}z=hPV*R2$wDvYaJ- zj=`i}yuBN^c4ztWWd^SvPKyrL_syBLsITRR?w0mRi_A|pecK)tlv?~bXUWRicYE2l zz0uB`VRCGl=*&x*drnBmZdZ~~?^`(YWN7!Y%=enxmDD(Q0PiPS?Y*~VaiIg-^pu>; zEo;(^Iv2M3RYfYD=1-k;Y|5pKt7*GSmL$9}J@x8Kt@BL@;Ypx5fJ6zhW zk-N$&W0rC6vE%-#M_*?P%P#I&;;mx0Wx>g<&WTyx>yE2^i!9bTb5f@J&Id;2NBRoK zHqK#|Kk@F}YpoRzo>u&R?>|*_o07pnzwCX>MN(Q8=kA(0k;gn>^Q*IoTi0)TxLD-O8y02f)cQ&1QG0EnOsJWch{amk{xrL82Q|_Caoh_CW%t=meV4L`&ZC>WHaw}h3 zu~nHRKgD-kxgipMy=nsC0?%{%InOTbvx9g z-|XzBjPqNQ+rqCsSikg{*!@4tjyw}hdu7?NbHa9EnNHUWC-=?e$=ktj^-Sqo?)2bw zp{x5YC`VqMc0$Xk)Gd-}!MFLJDxKB#hpZA$Jbljhx4Wn9dcQ0FTl{9R{B>QhY0uSO zqt;bEk^3r5Q{yC6HivIBo&53nX>DP8nyX??FZpYI(x%XPuZ7Bp;nOdD%6j%DpzisBCh-l`x{u> zdffjT{wH^GxjTbDO9ue@L4Oke{}e=RXYXN4PV|lZ_jD&$PA1llj5FW;9e?l>xA`4^ z@e_B4!&J#>GRe5W-CuB-q^lDL!lOYGg>h z-zNVr0z3dPfCfN?jQ^LyAMf*CHUJ>I4FH^s`6J9G833pV0st{SP2LQ@H z-nVwQ{*9PZUue0&oS`0z}A|IN&-!3?O|x3Qz@{Jn<8LUQd$Y6xFGpaE6MCl8X8a4Gr}fYHFIZ z^k->k&(Ts-(=pJUqd$M1;XKV*MkdDdOl180&q64E<~(`o3_0U@T54Ky=)Vca?*I&E zPB2j zd_ZZKkxd-tO`oCX(`T{h1Z?U1eo2R%fvZv_*&|rZydICoc@cMKT0%GBqeKZ%eGs!6YM=|RBXK%jKF zW9Qp+UL_lKi^d+Dl3g=iOH%Hu1u#U>MOIFQ;P^&r5si4i^NIW2(i&aXpz^s7uro2G zyq_}vgSNllD0<~>;orrt)@yXU=$o*7r2qrpJqCO~Sxd?0q~7&|wG%T(!;0`u)+eZ; z3UtfHb3e$_-@RW^)|jLqKcCV?WB|?0@P+x$5X&NodLXT+Dp*?uvCbeqK^dcgDdMG8 z@wC^ITuSe?L&J!YdDD;5B`IVn041*Bt9OBRzLZcOc}gl5xfH(}n5*azYwi|WHwHlj z0qj4mDAiLY@-*P@g#LkWpI?*jsLCxs%1I!!ogC1nmdfCI^B=fsb(B^enA|nwE$QbEi<(j;5)18NGtW40~om6#mK{gREC9p z8COt>M}<>1aWprm2z%|H=)Y+0Qjuz?o0%Ad>BA(Wz#3DkF@W5hN#|5eY@!lzQ$aF< z@|3d}d&-Hr&mrgcb{t9K@jBA+2WlV#)+K}fhlODuc8%7H%%AuhyN)5zt3uk)ncLSi z!>r2eN$JpOZbu+?2k%xbkTekgOi{~XT)jwatNoDUjWj_QJAjDe4-%8yi*Qsb!3Zgc zGmg9!e8U}dbeHkOO@~itLh7r<>OA9=bpAzermxZis@7)-q51Y^UTzL{IgtF&kMP(< zeA$@CKeg$nIIk$0XlwX{*>vVg4#CN#SaOh_f?=WV29h{ojVZW)LaDX9AX;`%SZ9~Y z@P!WVnQV)AjPdHoQ~vjE^V8TFAcWT~$+!d8tEPcBcR?z>61>iS{hx?qF*2!XQY=1t zb+b579h`6sI8RhJLaeRpIUED>qSvez@_PfPC{U08DI;0J7tkH{dQ@yaq2Cu)X3fXH z{yfg65GZ^87*JYvo;CkZascp^cb0>39mjz90+Wl!fI8#E;=?AqP!XH&mu3IJjcB+z zIZ#RE?e3q|h%Y#t5L~&rwSG8y$if+T#Epv7s*|mrERD;^XY(=?HQ-h*133+bnY%(N z2Ur$Y{nqKcTjb{mpjrVHsJiVA&E z!EFbHEcnB+c|}k1h)w)6{U`7Zfu8282VYc=+I!U2wrX9*{E$XpQ>&d~7UBh5_3|OE z5~XyZt9kBYvt_w1$EGW)|t;&>05;YT@5 z4J4_E4-xYeRxV5;oQuNARI1o2_!3VVEc3ms)mV@A#+Zj|T1(;-X}?dW@dSP|vBm0H zvdPk`rJef^J8X;JcNT2xdHKmAGXHv9-^HpYCH!jB!M#%q1txqfnRZ5IuTGSx_d$Je z=i`RUbH-Nb=r0Ruf#?m9(*{e6^!?W_rv9ld#YNf zd@9T=^G!8?2GH3RGao$v zu~k5(It;%kb&k)Q?eI5mGn^GID8*F!&s(}7a9SpYH)BFWF_}Wea?W1g}-!*v?ga`bmadKm7Q%h-r%uA8jc%WK*-zWc&5@28XxQm({M)0a`Pit%E zi|ED|$Uw^5S{LrOq>Af8@%rMfCSLM0zJ|fNl@;2HqO+GSh`&VAT?ro;@W=S202)cogyokHF2u zC(TOmu)YSoH%%g?#|ng17W$znQE9G(pqmU+rqUbq>Sd+ts$ojuDxvXl>pbkePzP@w zn~3Zu=?iCbd1j4~iQ^;vkX|Xoqs22}mt6U(1$X$~Uedh3-#A=5QDf4#=+Y=ZV&S5O zxfTWy|0aI(Bza>2AI>3K?CK;1F@P2ZItYeKCe)>qS#6S`0*>jSPzV~WcTw?>RbT<_URI@`$p}=6Unw{*g z!^1Rw$wfZ;grZRF3CRlvoLXYjU?fPg0|{b$&do=bf1W%%Js}uMqj^x&+tcF3Bt_E7 zmtT7h@z#G9M^(B``(X0m7!X{Pre$6s9?n^;Y-WHFWgOQSw$FQeX)S)$FA7T|F3QL& zs);`aR2>7fTE>LF>@2Xcq;R$t^RBx;HEmFD@xj0UDv5E?{+Whwzi zmcQ_t{+E793;*kJ7fl+E_(J0X$_o0PF({g+TfTN0c&WqIK9hWZ)V}lpZ+f2i?)0k+ z{U^rHxbn18x$NXzunQZ!l}Q|4L{=Pv`KP3R@#00aDutH*VfzQUt_wTyn$H~VXtei( z&oXb>B*B9>sb6a6y)Vb`+lsjNnL!4}4IEts0C#0!`@?Ql3qis=#y_V}=K39TlRa+e zVM598v-SH48B%Pt!2JEsyOHOHu}ooiD~yHCeu(ZgY0?lQ0TrTEdx!&D6loN}LB6IW zc}0=Q-0^s%Cw^+wHAyaPRgTL&Z74DEBGAQ?l(ov2yqvY5I8aR#siMSVS+T5H)Q4j@ zF-t9is5*xps?R|qD>il^fY0vaAmBzDuHOXa9u@xFUJn`H<}N57_wXzZ9AD`;R|vER`WI>aSB>v&z(t1<6oKIdVfU&? zyg%NR+$6$FQ2+4w`frPja0@wG*4x{7{|euk8spCeq)4FN6L?{B7{4jbjHw;jgia%Y zAs~!zhTeI`*B9{EAt(I_Z$;EB)^OZL-oAPa7g>h6?#g7h6b1%sN+n;3_m^R3i&jnm z0Jt1k2K(T{erU#G zQ1T%*H^O*8Lcuc1!8iF`)ZHTj4}O| zp!DJSnm|-GE!@cD?q{jUw`wAvNAe$o^naEMpkT(%zA!bf1x}BMXc19t+9ymqvYHB; zy|XUH<{sI37LI|(wJcx=6yp2igvU){UL$>ZJV_k0SVgHe`bIT}Iq5w3z98^M@0Dmn z#F^HCbSVfs`+|5TK>n(3koNOKC{`>qmZv-~!+)2$e8dHaPs9qVKs7@}j1o2I;%M~egFT(OWb zwYyo1#fB7G6CKTY(k}*O&8RT;)yN^JV6|I&zDsf~Wc-`i)^I0(wTH~TuWKw`jD(~O z5ATHNOY^ASsIeqdDAul;h~u|I<20>ZnemVAVxatH{91`vY}qCn1^sB9b1%!(+ehcP@-%mQ78l&6fRJ4B)8Rv)e9s!Sgsg)mRfC)@K2`S;dKZgu z-{E_ajFK;V(~N5pM|H3C1aZxx?9}d79_cfyap;g@mIwT74%f+x*lBl+N~yoZ4IDWk=zU{(kz_an+nMH}tQru$eT^JiB)@%W3` zSJaz8bXGun&)X)7cNG&P@?HU`a+r|G_8niDxStNYnMTXBZ=^-u6a4!4A8UFVg>(Ep zMoF=8F^X-wx7Ij%Edv%f?GL|pdBl$BSiRff0(Xs17^ir$g@%oNk7l;q{v@Nm_`*g0 z&>`u~>j6~!?LMsfr@F;i`6)WbUW=&_`>gGC3c?R@qBf=$-zE?~iEw!5catTC~>XX=N2mKWaw*f)8}!QomSNZ9DpS2gcp zEGSGO&hQ~t#kptZ1Hu@_jBs>C_XIJ1tix+=4}`j75rbV+S?~h=)piScFSo6BTw54j zxkcIrOj9eGzz1Gx^P8UAuwpwsY_wmEJ z`(^nFey&pjO>A#=yL?kB80DSkzOLP4eHYdz#wAd(w&-(s`of6kXm+3k(Ri}gRzgYR zdh6JnrV4e_nmDf_+;;)-c(97iYZ__p)#mL~nx2@NeV90|NGwi}R%PBvvlfxo$Vs^t z+WI0Iw{@{_r*xe4`SzmF0!A%wEYTi@nGS2;)(Y1#nhNVenG2w^t`A743{_dgYPtxT zUQNn2lXTHy#S&QEg<=VKLkY8(gEELKC(=u_7OmBdZY2T>L&Km$MhpyGIrO(0Z@(Hy zGR<(ZrG-ZGPeJ2u`(~&|UTO~$7(fZqsGOPLmCFAZP@NJzd!6IGXV0AB>P>fyJs4GxYArB_NNS^hh6hM;m*7q|+Z7p<3dyr+w8ywh`g% z-BT6EmAIaCtzO}ypss9S0#xE4=Tm|4Cs+>1c=j6U9OGi|i^+-tF;)ohbyWIbyrwb- zZMB~v&XariRz*H5mIGMjq`$a@(xz&A86&tJ%XP!MW0z3=lrOV7LxtxG`%gIm;28gP zGPwB5ECINhCg{IXS-ho^urF*w)^wgVlNxg@<`)t(_Lb|fg*T$rr(9ZJ z=-dc;z<#Y`8mMNx@LS2hTry=@_eTmp&*WQ`mVvGn#4c23H{d5oSadrlIMj_XkXFfN zzhtV+s@`pgP5{D&MZ9_rwm&_|KbUH7%|VINHx<$0;d2xP-DGhkvSY4wY`F6FP__#d z0qf`C(eo!7k4Ug4yl?>bf>HIFKt6snSJ4o!zr-Ao$VWwM;!vf91Lre)GJGd=ca#7( z@*kT5#Ec33_a+8syvV{;x}lrz93utC!;m==E+BJiCla9vl zw->bY(qI@Covo{)y#*0y1@-)IqyqkN&OgbXSkO~k&%M|S+NCjn^iVn95U*G27h}Zn ztmNX#NYd942S(#k(Q|zhbzVGxa`Ae`JUgKmob?fAcr%$G!31!a8Acm!N^_@^)auz9 zDJoUjr8b)Hb}C0=v=~-o-h2^O{zmnYnpFo>PX%U;$OT(%;ZlwPqXW({fWfzZxdB-?x*G*XntYy{=N@63c9}JS$ZwsqI1I@MlaZsStto=n}pTq_y~GRpcx?BV3Iz z3bSL)sLNxy{6gf`A#`Bpj^Buz!S@d{^aBJv2fD~7?5+B?4c<I0pVh=5Lx#fG~K z$v$B>Vgsf-w89j|^+x*WMo_(sgU0}B!@g{!yCFeTmB&YQ;H~ebWxB=!)vY(5TBX&! zn-a|MeY``9U;8uCpT5YiP=0uzO1W6J_k*xX49G)84wKgnSdKad3$-H zP7=6K*~XcAMUDf0cNAE@sRhA0BJ{G+NC{rv1v}dTfK;CeeHFM>)Z_Zmy?V#8Q^`9h zTX$rC91F*m%V~H7x2DP?#`tS!^&T{uhkNeV0rl?t=U1E?O^S7NwtE?#>s9CRb7QIzR>6e~Duo#hd~*}W<~ ze3Ju9p*kxL=%BwadW8PRVjE=GJf9kPEpw8J&oekRN-Ve~cLWnk9uiuMWt z-gO3{-HOsFOnb&b7wvJ(LhkWa zvEQ$M^1qyZj_*j7Qyd^PrvBXV(*&mg>g?^sJ4vmf?>SBw8285)c9lw0HiK3@7lFzUz8- z0<;5tjsZI0iG51^O}UNJyQ;(GviTuMqw6~DbE@UmCWWDiTc({win{hH{(|j(gmxGV zQ?{u;P^S|oxryKb#wKHKf&pGF@E{E&Y{aFai(2OVNS7mYYYAcy%Mlv=t~0_aruOum zKj{1$_78^{Oxn-4zj8Dcw>8Hux{H(xeysu9kNUFM5ND0%r9DG1LMP9{t{5_L$Ww!yz1q&BOJ(Q~+)as1 zoM7zfg-?I0`qw+Y(qMmBOmD%9cgMI{Ph$Rs+hc((zi0tCwqIa7{Eer3z6^ECr_jyF z$}$m^R9@*(iF*#K^tENnS*|eDE6U9YiM9tRpPIeZZPZ}|8`CZRf+8YaUl@mK?9NWW zLrhGbVY>uOv$J|jLb4FedHs*7#n@a%T|bqtjFzt8W%RIyJzBBcNLU$3EgmZf;YcJ3 zLZJF+DM2U{26j*rpTf2pj8<+Qfyz+8&OGH@bGESSfedZ0O?1O94p`EBGBQcit*2FC zwt&T5(P%Y2nsUZVN1e)1&XXfOt-VLtms=UF(tSfy0#c_{L?Q@-JYg_jOwFU5sf>XM zp}mg${WCwc{2S5XfrbwXmPK+_uKa>?qaM=~v?~c7>P!_ER!6@M0$EKCfyT^rdSo%_ z&)4t9dvYk04S=sj>(A7$FD2TG9#E2+N6qp>=_%)ht;%beylMHswKl}y%sjQs6%zTu)ZZc@0@k#j?T!jQ+F^PXu$@> zWz5!2erIQJeo0n&|H6hFzu^A{OP>A8o}Ze1f$w?SGU4^5OG+Q;f5Cs#u>VrU|NGSq z9?V;>pMF&W>IxGnlnIZentGbwbfr{L9TonAT`-v%(i34MJ*8g?(lXQy>1mXCu1(Q+ zmHI(!p%b|I0lq@&M>Je0A;^yAg}siygIe6IUwB=Z7E*;;VE}S#VR`8=RSo=$^=K2B@&`79^ z<5BVwPfy~0Y^odd$8PMA$92~Gb5FyJe-=oXFdp~p4BzzhojEXGbgc5Xe5%A{uF0ZY!JUuXc*&%x*=#fQBe}irg zUD$(n%~gsf+Njw&pK0nHPcX{Jk^S7u6yDBvK5je?1#X)BKHLx~jtEr|Y*y?`%RAg? z4`a@dds;QQ_61_$EJ~(^v!si~>e`wc(K;90raf#fE?G2)o4^)s$mqJ2tfGn(b2sgb zM%of2a`oq7@tvW$MV&KGy<9zLzeLi#3I`u*cjTtux_yOvW0Q*~Z{(}x<_0!{V=Ph( zzKj2A!jsJeLlf}UdMQ7U_v68F%CDxx`<2Jd+dox*%2g(qC-ajq{Y$Y4;%BQ!p(_$X z@-Qt!0?3Jva4G=^#w5ow#4cw&P5>;1iCp_$*!>KM6T_4znLfm#d#0EXScrkA_)j3O zuqywMzyE+yQoIY9D2aZnv3Qfzt9~SK?v&E@3;(^FkfWmT6UXGF6h`KO{z^BF?tK+q z=Dd%{##bKgJ$iWXdBSQ?`U_d${0pE;I|g{7hO2~Mi%CJl?#+fjhk`z>RL8%tW@vHx z_EJ{wN_rF~HqHSBGps_ehKsL@6_=fLi!Z3$4K*E1k8ETCM_RRDwUC`vI;ulzdwA9y z*BeR*on6~FCj&Gs96jL6I=L4Ekn|9<_H$xyX)sZFV4V

    tvH87Csy4VkS0!W8B#A zERzvSe>_BxR>UkHk$}StRqiF`-5z>?>R`*2ZZ1!FgS5C^lyQHSX4@fyk=aO*=VNw9 z(3RBrI50A#c7R||8I}`h?U>yY@bWQzXWC-;#;Vnc$wclc-&Mg{hgI-+aj(FqDV9Ee zEcsklHxiOoMg)eG3)*eKb2KDBTKG^qT%XML0$-&|9Mmci9`YyZC3uh=nyU-vGG%mJ zwai;PwIBEQ@is$1V4YHI?A2O{ zEc%b{o%!^M%f8J@hnF$O026h#@0qga_Xovx7Y?uPRSVD_kQLxYZiidnH-EgXeOu`A z9u@vZ!q6#Rh(t&MRl`%>?3i{%8s|1Tk0pplh=F#pqtM^i%4Z&+skwx;y|NT%`6zQw zDA?I4aPsx`TD>l+w)dvl<9v+)Hn><)g)myo4Qk*7X?cDgFNLKlDYg ziNal!&RawPHAL-!#mSkKS>ZYfA&F|vDM2*n3Q^AC{C)M)N-}QR%hTC zz+IW%^n=(x!^2>8{RQRH=;gz+K|6HhjqK1I!{a(L+haghO#QpfGw&=3CC7lCR;xR| zZJ(?!(2~k;?y=LZ(Es(!k+u(#i9>-NKCXDhi=Oqub~j<7 zX7b~$-U*h?HCb}vVHUwNG|wGoYA$|`n(`*p+d`pzP)ITti525uNhS69^!?r^xQBi` zW0K}p>KOP6?S_K!3f>m=e1<;>)muTViqn%Ka(=~WdbsQ8$>#9F1{}D){=uQsFTp3a zU80YIRrY^V&iu;PhtKbb8k2Q13wENfoioWgpZ|{8XcF5e>zhhBw1wcZVuB0C8$mjG z6_jx|+Y^rgJ3UQB{W{nCF4&iqk0SZF1wqt;JUyWiGZ)|e9Ap0}P#OgvEmV#Eco#xG zzlnew-M+ULk^VOAOyDr$%)g#U{?j7=9{CSthZQN;+D>tg4u*n;YiY0Vv$02M<~)OYl9gk zhMWj;QqZ*wV+N8E4gFc+rGV%Kwii}{TTCcM&meS*1dH@QkAxBx{p3F@EiP#En?3+UXU`FY zHr$7d6)PEZ)B@bhQ2`li?n7CZ`C?#xmPfgWG7HK-NIXdKg`!VqKv7w)9X}98y(Gq! zf;T8~H#1<(E52(sKdyLY&*`gr7s6;}wy4nWK2J*M!BpkI;mxtxVihIG70gcEp*B%= zCMsm&n?wxYjOYKf|GzQ+&%z^02SeN%-(bhea)r0gtZ9Wo-EEP{+`%8!t4I0$2A?M< z;(5eHHP2rBukR=3vqE#_%WqJgBf$4M`C8en0to`^_T|el8Ip``dFVI^7l#C(nNik4 z_RW***BQ3VNmxr=#8OMe3Bmm6ZsAD1{=;s>7Viw2M$hT9mLSXX0_Ngw)Nw^Lz_8oC>D~&}&&+rq zr}zt{gVQUXJ#%7ug*koz2UZMP^&QyZ9#yU3TT;SfV}AizWJzbG(<;zuXuBDAygZ0RjFFBcHVd*m?qQ{NXFx%hae zw8Q~*-16|?k6k?#rIe0$r~KKZkS1}7)g`O%!cn}N;nRj&#{iGSx-&ocp2m&rZi^30 zqvbFAz595HX0IBkFU*qkt!w(6g;>8U2)GFMNPz2NKSyD74XO34CdZK$^m)Oh!;BX~ z8_-y#(QBnaveQul=y<*eLxbck9-bu!+cuSA{k8BLTEWAG?+g0ybinU%nQdyJ8=N;O z%csuvm1ShMc0Sgao7j~^*wgr3@csJo&;qI5stv_34cI73!E?-K%CkXwmfaN}t9==^ zh9)9X$_m;#EN<_7QI}%)(d35P3#{s}eEtZn3|aNtOGdf1U81l1a`y`0{DKLoSLn&@ z!neWqF1Rm7rMe+Sp*d;zY%h@mFCO=oP_l{cxn<~r{|%{G>D{XAegVTz4MUGwmX@Io zDmF%BrZbS4&M^-pGN62Syz2e?d!lnozfoNNiz2s0rM@T(PSa$6An~9fN!xQY0^)0Q zan^_l=QE9CivM6Z^xX$xqPjR)*a%SL|DPA|-@FIRS{cbQrvQz;rD`~hOdVa}#;IYz zJxG3QL>V@W7RbQ9ZAi7wz>u8GW^j$d?AxYL{tjjC1aC4DSjk5 z&7->dZRR-F;yjE>{6)TZ#6uPP&Gp`Om)q1(Nr>-M+SxTcU!se8xS+E(q0#d~)2WJe ztztdE_O;t(CKLNZCo^|~mws`JQ)~W!jdtqwaC7ne*AT%|@o1K;*lFvdFAZNwOFg~5bniq^u)Xfj_Nd@ z^OEZ`lzXO(Yf#QQ6?Z@EMi~&|a-%E-hIRVLFw>cg2n?Pvm)S=7ZEG1X=vZ_QOe1l1 zET@94{HcwHE`qAj3AotgSJlZc$FCBc9Mt_Iiqvbgy(?IGV=mxCcv0~KM)~pOu*Jl! zax9SFk+-Ld&%xq!IIDci0LhR|x5Q-upDnpB*jMTp`oZ_0Eb|U zaW}F}W4+HWfBP+hXP72x6~_iA%AdBe_+XQpseNv~g^kKk*URtzS7 zK^@rL@UPC$=c_X2HBDj3!Sy5aU=nQ0iMcA>7fn7VZtF?Tn5ksA4kxbagkP$0E4n6; z-(dq|c6IMUWC`&~@W-POVKh?0Hek}&_lgS>B)omwL)%o#I+JXiaIPM%kGDOJ8e$6K z?`t0zS;8dKsWE~&08zFz(3KIfJoaHX9OiTv5pVXzb@AHj3g>UBu)tP7AG z?ZcIVx+B>_X|J7;Tp)F|fbCL~jxk#a)@)aT+Ut84+%;kIe1&nGxC~u+4salUm*FB> zTtZwzs5AmCl(bqLv~tkjls+5g=qrk?j^#a%DM11O(SQ;m{VNkD=*;k}oV3e}G zWCym&=>c)V;_=msiB0?*1z+(-w?91}y{rFFlTWaX6J}0IVT?~n!X-1XJvn>HiFR@n zJ%SzfVZHhWRld!n=OHNJC}tIMS}jE@=0lI8lUj0;XGGS*$<4cAH+EC<#>>6Vjdrs9 z@O(K4lCIFKN-;D?k zMei()Fn-J!@@##trWr&=TDE<#uMZ_bHn4+;i}!SfC{(c8a{geRE3Wl$8gtAk@RyT9 zsXYv$vyd;<%`$(pmN&Kd#p+5NRuBpi762Sv1 z+*T5fzp$a*HHU1uRHSlILW;dGHj4{yVg-rtG`=VYE!J&{AKrBVrs%vE1l8TnEy)u) zOA;Th{U9mDIv@-KF&70*-MRJ+0ALvIcg@{kVNE|5!aj=JlQLL}gn-Q#`eP0lLqBMF z*I2l;;Y~dES~TEWK+Wff92(VRmCrc-Ep6iqnZ_3^YPx!sXx-Ii`(+%ahTbrt?pfsJ zx$=yqj|c#-{8=-83lkHyD93o`D)P)@%Epy!J%2cv5B9y>Nc*JodNQRZN;qXORFU~n zqoc*tP=|*dCrwA5ZlR@2-HB%>5_1jYjdU@c#x}Tq9>;XEaK6kz2etHOGvZ=CHQ)@% zjDH$MSHtnOXoeG&uKe;nBodxv9M!3jEY3T}J$o$%vw(Rur6`}&enYz1ykyPS&x>_w z#n3FyJlXYw4p{XX-t*I1qGfSZBE;)L&lfey&!)RPdY9s98oKN72t{Zn|!oPNfC2g}8{6ZfXIu8U=_@boOs{UG_b#E#zaA zKLwgM!4FEW*l>W9EILzD)WGc4OM51@1+-dp3|&B1<0;Smku~G$?=Q=m)IY=xcD`V7 zf|N|PMmQpZ?JRZ1?K6efR)I36{! zy({RB&Y6+Y)vkuUr)-&%>AOaQRW)sa`TM`DF{9vfZP4z4&N23MxJi zP7C=G4`wtYjx4+Cz-saa<>m9T1o&?KX*N}a;rgxhi``?fa=ZgpuI~+dt+sHbACULQPc_jmhoDskP5DZ zui<`*+uYXCwqX&+P>C5Fyc5n>IDmdPUFa8RnpB6{N+J&?hm|7pEdlfI{7%Flo3FfY zR4zcN{S%>~kwejL%flbtBwwwK0_ap5jMX8OcY8!=qos6X{-35F>4|R zZwhpb(73ou93yzGUuS&@0GuI$nM1^b-okViUKWboD!g9flbSWaIuq`e>)O>+?7%-T zQKP3jre38`2FIx2`SK7>B$SoQB4*?a=5LlAQP=P>PtiDnlUkcPaKSjHVmn z&`(ih!^Wt#L8FS8%J5P`YCUb-BC@ndaQr1!zWto42kwk&NC!;2OYmVID6m89K{&o* zw|_XJ>#*z4Rt>Oe{@&{G=O1Ywj|}GX?>9WV{RFsE7+K(wfV;Yu*3i?&Z+{?Ap@AkG z1GYVG&aDM5UT3kQX5$;p2)X7borTY^8Nq6pg_{ywQ|`QWAhK^uu_q@-JTfq6^j~F_ z*#OGyoS7nj?|=*xhHl^wz$un@4|SQCT%8b{i!M!920Mt~=_(`J#KnQ!WVg@&D%&?* z9+Uwm1Lr(${!k6esB#YM+gIIJ*JaJ8DflBs%?v=)D-&O{Z1z52iU#KnWxb^%wz~D- zNm9=RX+kQoT>WlCY|~RYY~M1)WIijW-b}a?XwY}d>dSAW08U!GCSViYExF%_U%D6W z9LJjFuAgOOFLo};85lhNFrEN$PSS*hokEH8z~X%$B!3UftDR6kaCR>~;zaFV_y?z; z6VHT#zJ|ZSCLm2VrKWuiibjA)SGB@-*_p`+9O_cnWP?5gvD=y1-+%%I)qL}EF{1Bp zITY8MoF$v{n+FmNf{uQtNosAPap#X7$K$`o4 zpw{#phF{_g7#0tCN5oj8zY{c%0m%}_fXra?->9WjPOzvec<9^IlGv?VJRHAAp!tID z4v1h(+1$7kx-E7V)9qrEi!rxZK69n`gG59qvi!%C5SfpG?m0 zvWq;k4XfVMHp_lJ*>2?PeJ%H64f0x_D>cMrsYxnQPTv8S*fqzHm0zBPh;_tziZW5$ zC71heh;<<~9?}Ka#dg+1h)lN4>!uU0Kder;hUlB21)Gy=WLWE7rW*RLDk`|K&aCw+ z3z7$imIIq;s>0RLWY^)6_PIQmOLZNi6E%)V*F2UalD4cRV$C(x$J5&Sww>EE>Rf5Z zf^-J*+?4}ZS#0b<^GVAa3+`8^c10I&=zZ6K*YlV1!OT z(sM@!&V72jWVmy>O7iWdv(>iXJ^(((+k=uWqAn+dSDz2tdT{*<6v_&PTB3N*m@*`V zxk9$tt}NT!c+e_6eSH)nG0^uMt!0&zx$;{#ps<+lu$iqXc~e%P@^Tzpz*V7wf5Xs5 zedm>$5ZMf$_hr$N_NcY5(_)W_MiZ49uecJ|ELl~x_8{Y1WEj^U^Z+t{ZGZ_q{a8Zn z+gQUEyC8(Ia zHhCMYvB=yRX^5iIol9SSqY-e@@g5o9+Iv$Kct~MSa#ny<$h9ZvCUm3M#+TF84J75*pXi-LEP0Y%es(t&$pjD$!=2pCHhe z=*OsrlF*{z;n_;2E8XQ!I&z6P!rr{|>svk@)q4zJ!Cl_u58OK65hb9)p)5VZ_#9Kl zhbiOX{`s*T@_9RNvPiz{rj+gS-V|mmTAzC~38VyXSFF-6h)2@5VsP=*FSV<4cjL~j z!a_rL*qw@%hv$Qy2e!9fHue!E%NP-wba6Td+Om>)7Vp2ITiXQ$s!R(igpg zv}CW}1cohfabDha*1$h_CYGG7FUYz=w(f0QS*fhIiR*-I8$U-Z+=(@dz}7!dN>A`dIlod-tdoj(`beZE-Zvxe$_p0MzZ*xx={;F~u@@=HYn* zmt|MQNm;kO-QnO;QmbaTyQ$v$g8FF)WGSj{%e4*KePQC#@VO4(0+^AC$7$}ol&p{5 zC-l|c^tQFyO_jZUX5!|G)xb-N?8(^oM_r5G^vsm{_LZhU>qQAwC%n8yCv zCCWOSqGsC=wUvAYDnKcY;_+W>+ywHGrcJ0ZM!0KnhzGZQx?4e;=xBT?ErFqs=+4NK zmT345^|i7O=9^RO&xwV$7{gv^&EK+h*HYm@wB3~V>NyBdW^I>ls&KTW?OV#x&tcxP zpvuyZa!nbGuO0AVjS4Obs*K2TJjqMu%!9vkrt`mqGZQtI4P~gr1P=I!u&V;k*@x$3 zbwNU~fnh-g26(moB=tbtLLnGriFB$XKeB zx&5|0tsl%cHm-=V-ph61z@Psl1%2pQE7>9@ha5A#IL`fh^20b{_wZZGwt6}5uVbV< zhq*Nphf^JAZQu8prt<_%lhpnqb;Bk4H1A|oPoaXG5U(j}hEHAYTHY7ZXEzLHTf=fs z2AU96h`o{2{kVB1@wpli9pJf~_5I-4lv@+}m1{gzJ?AuP>Z9J&ZcV*XB)|rE_w2oM zDJk|w);{g`NZRT0fx6H73bH65>-e7e{j_^WNeLBJVVB#*LcNdET zWVyd~Dmt&@2kou2_P;KxA7-snM9i~%a2{|%ZrBF8%$rF`SB=Un56iTOjPIwEt&EM2 zCWTY&8_TvjAF!EP-_^p+879hU;4|Xa1B-3YMhCV1Xc%O6^3tyHJMyqv^5^ z;~?wdHUo|h6K)>CHv5}YMf;J!t9bd{-WuS)A#js)WlIga`W_E8JaRENT|BVd^=awN z{Ql>onCisk^B3yf%=x>(6V6IMjIrvhVv*&_Gg|gES!v-9FJM4z!QP+tJmE*9^xhfSXs&6xQ>JsR_ea=C+n&;d{^KFXbZ2 zgwJ(DLkAI6KV`KbnVK=AprXN0edMs9X#;0N4fpUpBNQ}W_s)#uCR}dq9rR41ubkZ3 zG~=Xis5Q_){x&%1GkB%?&z3f)n{3UvwXx4KD~MNd?DCVs^Tw-sEq!-*(BGfE;WreC z=Cd6XN{Da~cDZhEFlT6%dZhw;CYr}gr)-Z-=T(t77K26_=R(0NQS{oMx!HYlLsz@~ zr`^rm^pV!|&dP!yhHXBS1=-kmk-fjg*q}c7^zZ9p*zfCNe?Ey48}+9jcp9Qc&z2*A z<$VxHfX$Ul?|$15o=}V_BHm8C)a09;zVH+9Kg-idw01q5SSP?*i80J z=U1OroX(pP(DQ9Z78@@?oRq?bovCiTDXq1uVDRF(ERIx^I7GfE1!Z?)`uw9Mv)557 zdL1ZI#9?eNOTD<|gYtXw>Tke}Ab`JFoeH#jGh6C_jmtm>N8o z5B+e@`qeVWxJd^jH1LS|#R4u`jO1vXpu{(3Xz=U-1ouNuubkbVI+6#$Aw$=?$r{gn zAJzw-zYzoF%1wCY_n&R3tj1byULuUak2ovzcoL(nl zEF@S=g+ixb!Keni*W-@X5YO=doq%!rjhrEqPxa1hWkIV3dDIOMU+|T?)FtSTE<>#d z(<)BAp1>RGiKGJu-Zo_ShUf$%RCcI?*c`eWGGQ4RCgCoza0+~-<#^*)< zr(}bjrq6aC{}oQ*<1Y=WU34j5SX4w53QjXB6qLRCwj%FqDoxR9K}?RRA;alOSx{5u z5nm&GA#aDb=7-KvgB+}=4n705=#X?^OXaZv90$$mRKjx~wQqA#6**os7BmQJugvnn zcdbseKS)r;u8e@KC__C{kbw-gECCR-**mI)$Jm%H$M1z(Rty85;VFXQ2TlzyflfaAl8^#j^_+o-MeJCeE;zlq-i z+#P*{gQp2HUK6v|5Ms*4IBqU$rZ$m(mTlTA3U!K7=&YxyDSvOlEvM-Zp$z1H5*x~C z2_Dx#@+|&<6uY>myXOcw>}HD(l0Q~&=3A|mIqMwJsT$o*u?!~Hi!-$k4KDVEK}HS8 zmpsk9jsqKtG;E83v`CTHumI%mH&y2JF_CYs6&FhKAz6aRxHY+rpf+2wGBi^di{WY( zUKo@dpp7gFMX0Tc`kzUFC--iOPsQMzH7iN1SfuYfsURbOz`px=WpWO#CFT=Q;Ud9# zg7y}?^J$@@p=F8qbgI}jU!N|#tY`AfBu?MJB;c@TxCn!WBe)th$!)LDGVz@CzYlDJ z;I=TBu&_w1@kglsgMW<-?p7#S1|e?IBp4D6{MdakoW} z#<5JAyrph-CQS*3%`SK6(OMs$nPpnJI$`oq~y%hBGblUoh+$atH zNW0H6lND}t>)Kfw;{nPFsyq&E-fh7ywJ!0kbd_Qx$|U*smK{FAlJNnXUT%@30xbVT z>Sm3J3`)JNdea2BV6u$sX)hJLdmAk_Eu9@nw&GEa07%P5mLv~DTNjhYcI}# zcQ=EkuBna<8_x;$FeJ;^l4NUR14ggY6Xk-}o*HEee&(_cco#Fqpo?lGU@|PhDroU^ zgQ##e2(D*o;sdtm*|&&#-~q079j{k>Mz7YNqS(+*5`5FFDQ^QVVvhCnbe_=u1AWck zRz0Pb^&c^pa1LTQ+11b$4UciSuobz?JNc!L9|VgR{~C}`y*-~QpuXSE6x&{Tf4)Jc zmhT5Zq`U3{sHJ|kAj1=rUFY;Yhf?90aZ{o-?cmnSkfp}&Do?I>&uv2}S@`ev(^Lmr z)ADQ!UrwQa`-n^3lu=Qy7Oo{eH=AfGa0lWTqACDtp0#aaD{T_i@U5lYq*mQn(GDUI zq-+4)Bj7a##8$0c5N9ng^!+IeeR3X4(kWCr@K%I zfybj8YzJyybj4#&%jJL0MvnW+P4s^o3}t0}P<3$*bOX)D4IfGg(EjR+CzE{~0R&PB ziCe2JyPYxrA5r8vSl3c#+za`Xneuf1n8rGFmG#ZO5&<7FZy3MbpiMg*1OIT(cSA>v zA5JV5VKQRiM1XUd#S7)LTQih7^9-tv6KAKTjY^9zF+arzVYUd8eKf@r33S_gLt?ou z$D{Ax4_Ubqgea+RnK`(O$@O;%`-K|s{X7YA9jQd2gsCSCwXTv2LBk5Vorklr1KmLY zf-$@2%LnX;0CS-^{$t1B&nY!)*6Dl5zF4=cR{y@#{wuH zSF|7f!&8nDCk$!Bq`Iuzx4tNq*5Dtbx)guDq&FE3>TpdWVRcd4e0 zDg}zQ^vbBDbvscVw?Apr%(#s@EHn^sc{`|DP`mqca@~qG180^b%@jZX=qG$b^Q)gM zysw;-ntJ&E0tw^c*&@dsDy;~dJDU@BwO5xrkKy>hNV4Ld@ShscZJ7h(nsYxZ@92N( zxT9`j-d4AKdqY6vYBqH#U3cOuwGLobD(rMT%|{ccB95YoI|V@`Hv`|)diJa`E$XT^ zMjO{&%ci&lKt$2CIwKA;tS#iEZk1}{R$qg&>|`v%$^J>Q?v20e%(<`A4hLd3J{UE< z!d%R$g%wnSrmm@n!tMwQ`&_^9{P2ul7T{K%*4-2vDzbKE!K_i`GF~)in-GK}!`X4U{s6{(Yl?}RLN+O(F|EO!CjUOL~gTM?M!7dTa71! zAw|dU2ZUMPJ6~SmxVW^(z};8lVR)FN@oU)3wBzE>8lrtE889Osm=BQ*>`VO9jJ720 zAA9WF>Ti}9UwiKef0rI$+n1~GevI->7z8u2lnoAlzML_j1xC?G>7a~2ti5iXpF^*& zARCY-9E|J+O|GoS^dDz&!B?#XHiT^&)KD$q8DqjLe`IYd9_Xpe26xnU8?qCV37m0C z8{(VAjrj`0V~lC~di9Ih)FWC|Lc$H1VNGkh*V~^-Y{(P`+GQb2`lZS`2L&>)gO;|5 z1;Q)k!|1c-Z^djXARH_CfU4Zf{J4&h6U~OXC*D#Sn)Jq}6=0aU-f?yuEz+2wlrQ$X z*d_0m#z{x~Wb(E*^3B(!t2vAd{~(w?|^-^n7p^UFJ)4Oy5) z5jTsv3uZx@DQ)S!y2wPh9y$SXrOK(_(Wj?xV&Vht#3^4qry#uA-7zBncsVrOX2pt> z6ITW$(d5yYncVMDE*kbG`wo%#CQgBOZo~}|W0X0PL85(o+-(G7g+4+oyJ&t|+BNn| z3cmRKOo(~wuKXMN+|MI!Wk2olj>Duu5W(4X1Y}n9dU!CXr&l@EdJBne>XakI!weX= z{>R`rcs_TdVe`f1CoWH=bs&^3@dQ}>OuoE}l{qz?FHVfB2{7+@+|!h)*u#|Abk$E! zyVw}^0q0^;&Q!s`m54h#9mJVEBs2fBD<4dnms>}m_##3kGC=jg4i{~!e8G-A zAAPhd9rT^~4^OREmTDIieCupV%^GVJd$^gh_QaGt@R3B^&mAfgp+JeG6gxDhLXONQ zYfJRxzLqlir?p;{2D^nd+b3DJvA&iz_OPXE@L0H({_b_7H0H@vhUVAepkARDUB?;c zwL_tpqK7*2BPVTfe)?RRD8>kZ9UPRUS@cwc^#yKfsXROH0t2%eP7VEnFC0Mr>|f9l z)w_$lxggt~GbKKyIL>9sFHt^~erx)TI9hpQcST@6`5i2AK~}~o_cUg|oH77ffLC7z z5#VU5G`Xp9t5n_=rwc=d1o7QUQnG`Z*KatFc{>ihuCGu-v1?j2bGxP*%ELN4e9>z` zvqNBG99j(6DX1Bv-beKAb3YXsB!y79CwmC}^oBlZ=v8!;CPB3;F?1J~`PgN$@3JTHU9xd~J5q|= z9(2r|KfNzs-M+Y$L+o)prk^6pQ{_LP@FTOcu5@`jDr_|V-OH9o=Cnq1v{BFqem5+) z;~>ZIPAM1+!b&?BVQ5h!X$!2yQ7J*HRc`G#`?<~&q<#p zJwEFu(&Nacv@0Hd!_SqP`br9%^NSDMUNy{Ayp%R^)!3zV zLgiwv@zcDJDo{=wEX=hbh(JXuzErs$Mq|oB)Qj`6wqOO`vYQjU zw1)tp3KM8cWn5c1G?B?&Tbek(=A?Eb0w62?tcH>xFl*e>9Gz|m6SdRO_#%zs(yoH_ zS1q*M9D)Ik=z`#CUW+&7WuB2@rRb}@kEt4 zv|`L_%Jw{xuNhj-cRaeOoF%SOQh3{KLjWEVjsA88^7*g)(5B~Jt%Ux3(sybTNJ)i? zwl)TOl@my0D3>;P@IVOw1)(U{f7pOF2LA8@9^JIJk+!U2nYa=b?a*Wy*SD5xES3btY1>tq2*!*BLz&YFG2UBH8&J{kC;1ZYUj#Lt$D15 zhkmHl%st1V)>o|FeNt_1%Q(O z)&WEk<@sO=N%GDk7GoNt6Ev6i2M>!o%Yzsl7SMzikl)HH zmH2P(rr2IP#7VVkW6wvPa5!}pGhXwVBT7*q_>hS=mmT(=ma1>x zyslAgtELUfNo;WG8ypa|wSz7zBHPmB_7^UE1L|VY;%@xww4uxrD(>}pXY~)6bykP# zRC36g`*F5s<7~9E*^0$y*?D{)1}zy!-I^KRFMGL_W0LzN-K_DH-yr9n zWdUA|SV1)1#kAOjP)eJG@8nQUnIDxzkB%p;1E2AlE$Rl#_h!trsFwk6NnU!1ONYk&W4@h@HG77OR zgr@_ijRCz)H(h`)jcmNtdkw$}^G4VL9I54S+SOxmfBd`t)s(K3Z9Tmc*ATFD?WkFY zCV?S;trA~FG?mP@1WG+Tm6(YL3|jgy|Cp(jDRA3#V@3QS0mq-66{5Md^i8`{$w{!# z`qC`Y-1k$F3cYC}$tckg5AVxTs{9uw{5)F3{c*OpoI_(lFOed>JTPUxNXbzF2QL^C;DZK{cQS<#tt9xu>ezW}nN zb|Yoo>ro3jlJ4gQevls*QEfikVO|+zimBL&A?iw6And0vZl8qNJfE*AXpy1b?LqkW z2IuV~FLsKe5ZU6Mtj*lEnDei-i63I#OTTudH!!=aG=09zF$E>UM%`k8tODF&qIPUf z?0-1{aObe_zMto-wjaNzpDuOlh?Uvp#l1u1)4!D+HpgS`A(^$ooQh>EBLABjkVg{I zDw%nEV8Lc(yD?iS^b_$dl`v}F^J2V7TqI7I47&ys^U-^7u&@DW{c5L$>u{j}EA(&? z2-d5FIJM0&lBfoap$RUI1x3ENGQU)^7?mBMj)~NwcPQ%v!42#F=fkzE^s*ySdiXAl z)_x1l<0U1fj&9mr4Mo|(K8$OoQhTm{)Ovf_v~Y^aavwUovI7ef`-jJj@8rL3G|zEV zuV0_4xLkAaC$gGNrInYz{+muNcxk{F5p8MK^HMEuxUdQ5I@%WAaBOEkUw$o`Q`PtE zTRS)^(iw!P7mAxRCCk~m++A%#2+`W-QF=Z``JCFfj+uY0NB-5oC zbcNkegc$a~EtibIZjq^$rANSyTwWz0B2I2kT*_BPbX(E8^=P8Vh>*@X35fI1sU3dm zc2eG6`QET^M7DTwG+{19LnEUrt^J%VqF@vOz-v2@e{iqllIX9oBd?#SD9=)6ost3v z$U{=L>1ERK#F>A142=U{pE&46dq8(WN1jgjy!{!u1E>Yq*^C;d{NA!KYK(vBKHnXu z%Rfl4of#yz6-L4QP}<|?Y+c=na8=O~N6iMiT6}B)@}}U!Xva`vr(uqgA5A7g@sr*@ zUXpTCyXo%V8V~**&O<3_tQD*;2k7G{j?5O}W&i-{+mEgvOro1upK!p#_bGPsEon8K zpsWy+)PZ7Di;WyXZxF{=CY#_vhMsbXz5F=A98zUy*O$2)WXnM50bi72eL-9lQodeB zqZ3aPu2RI5clXleeVwoty?ihYuub_0vLxqg4^x`_^rvr7!ykwQnvl3});*y0E(}t3 zn;s482{XR*c=XKtY4h3QGe_%+%Q=nTFcT_;=YmFTe-Di0Y&J!);T-L&N0G1~?^aBx z7}zXLn$gtVdod^BWciR1JcVnG@LwyD^N)EB*1V5MJ6ku2DZTF2l)V;IP&PrN<8>ta zl59&n(*l&kYNATIQeGMJA3j!lcL7+*t#!Qa zuUM(;-R}F$pG9B`ncwci6}5MpVj$!~f?TatVR3{xn(=G1dP^Yn-x2=PxmD{VIn|_nNW`i-r%t%Y9+CIcqZsNzI7y}}`i_#V#CfOpCGk!Ai?!0^Ld(lj< zsCR6#Y<%-sulM=DfG1`0PpK!V z6Q<5djni$a1dN(BAC9N(T8(>D0IC2e>*~4>Wy%@x!#!CteJOeI#Cw)jjrXkGd5*k{ z7#U8IsMR?hGk#Yms?2b8?;jqiDM3u>x5(cH=bp^#ORGOC29&(L7-gP06{P9QPaFPV zO6Go+mgN=XkT9{3y3f8rM>#CS&){j|OtXB972bQ6+-jKmJ4AY!i*FLywQ<)tFE!5? zyYO=L!WmbIS48KCBbO-!J!+aabLR_|Eqwv5K^u_M=yh%>_{Slf`$y;h_8 zXZHExr6=x^YYU~n0njE%u!3;0T)V!M0Z)w!Ji2sW9DfkEir%DxYZ`P1+#R1DW^>DV zc>mpyWtAzEn!d$(X9R;9bcn>>3idYaO_G(BEwtUa0=QW z(lreOn#sz@;+Rxoh1~Wgrz0IwA(u&g&s|m@`uuUlQlD!-MADr|(gX{@{@7t0|6@Ir z@YiG`bqB|PqC2o>Ep$8u6sw8h1gytam`-(9kclM zRB#ua-1<5ZgTbz<>Qp~F#)({$YwE}M6Y~qwQ?A-gJ+!kMTK*G>=4Vmc-9|DZ(rj#xcV zENDN;u3Ch>stZa}?=vDZmK}h`r%3g+R`X{uxF`ScD3Rq7JDRIxC7*jdz^XcUP46&G z$jB!{FkZwH)M=MGG$4|klR=O(y_MQCFz5>xu+uwXg1r*9(YSii+7LD9{D6wMVkCTO z-9>n}&N*U{k2QfyOw;|EY)dpQw~5w^6s(q28528rH`$nPd=j2&pKSQi0lXY1VmqcW z(TmGpRi8^EG*h_JWZAydj`!CAKQ>1b-voJR=H&az#YzLqfZW7b!Yv`7Em<<1cRZD~ zpgVXaT>he|XHz6Kk;NK7GV%J{-#TE6J>fi|8~;5i|MZSgEZV{-H<(k*fR0DHosCIq zo==zav-sgc_VbsPkLL_h^=atiMAsN;M9tjnO|JYUW<|3rz@}kL@7b~tY)AdcMD$@R zFu3Y!wr?4h)UL6N!K!nm{6|cWz>cs$+ARc0WW?QxP*mucQG8c0IAdIf3x>Pbg);|7)n4ef8w;m5hZ)7M zZ1el2=u^n8{yp3Yx|${^C%=1Xw(iaSdB$I``=i&D{f*;&ery#A^HrLrjP|qSv?I&) zDHyWY?_X~;xT?|;3I>Dmrq=)cpF#LPdxsAt0yJJl9OfF`g!P)!BCoX!D||e3V`CK` zKEas`aj0G_TwY8peDOh+jB@KMOs=r&l+i%KLxixUfc^oMV*D3jxKKvPs!hcCn0K<> zr++qdjV&ByPd)!>eeGu4j>a?J`ujUCrmf!ne%1K#kcGcu-gm)(e7^y)b};~|Rh|y0 zS2`v5s+rdNq|`2Lq(X6WZ8K7Pz(FEyrV@oBhV_{;~FZRi4XQuAd$r$(EF; zB+lpAdIyin(J@g(R@$JJ^4srXr7otWdbTqIPkEzLgo*NUkkC7H9R@4QCx}4^{W54c zXyE(c1gr)0?KvVhC>oT&yNE;6=YQGv6QEdZ(IUHkNG5OQ_cuP0O z>}Hc%SM6Qrd>s*0>84ept0wkTm_uhF6+c^hRhva9$==OVjlq-U?SGAflx{}V8We_|L3^dC z(c#cmFW6=fq`G^`^m*}|7B-&lX~4X;g{v^hW?Id^s(RUZL;6=!Go1(X!?0X*uPU4%U)U%CZ@Dm#? z`p6vaeC54dvsZ*50May{db2U6#!BbJZ^;y+Q-H-Z&^M}GZl%ydQ$`5TKN - + @@ -964,7 +964,7 @@

  • - + diff --git a/netbox_service/index.html b/netbox_service/index.html index fa32e21..46009ae 100755 --- a/netbox_service/index.html +++ b/netbox_service/index.html @@ -867,7 +867,7 @@
  • - + @@ -888,7 +888,7 @@
  • - + diff --git a/netbox_worker_api_reference/index.html b/netbox_worker_api_reference/index.html index e3f0b19..a87a011 100755 --- a/netbox_worker_api_reference/index.html +++ b/netbox_worker_api_reference/index.html @@ -867,7 +867,7 @@
  • - + @@ -888,7 +888,7 @@
  • - + @@ -1567,7 +1567,7 @@

    - Bases: NFPWorker

    + Bases:
    NFPWorker

    @@ -2254,7 +2254,7 @@

    - Result + Result
    @@ -2552,7 +2552,7 @@

    - Result + Result
    @@ -2938,7 +2938,7 @@

    - Result + Result
    diff --git a/nfcli_client_api_reference/index.html b/nfcli_client_api_reference/index.html index d75579c..fb26ebd 100755 --- a/nfcli_client_api_reference/index.html +++ b/nfcli_client_api_reference/index.html @@ -865,7 +865,7 @@
  • - + @@ -886,7 +886,7 @@
  • - + diff --git a/norfab_architecture/index.html b/norfab_architecture/index.html index 25fed43..9bb902d 100755 --- a/norfab_architecture/index.html +++ b/norfab_architecture/index.html @@ -927,7 +927,7 @@
  • - + @@ -948,7 +948,7 @@
  • - + diff --git a/norfab_broker_reference/index.html b/norfab_broker_reference/index.html index 74a4f9b..e31a76a 100755 --- a/norfab_broker_reference/index.html +++ b/norfab_broker_reference/index.html @@ -18,7 +18,7 @@ - + @@ -1079,7 +1079,7 @@
  • - + @@ -1100,7 +1100,7 @@
  • - + diff --git a/norfab_cli_overview/index.html b/norfab_cli_overview/index.html index 64e9214..c36b7ba 100755 --- a/norfab_cli_overview/index.html +++ b/norfab_cli_overview/index.html @@ -867,7 +867,7 @@
  • - + @@ -888,7 +888,7 @@
  • - + diff --git a/norfab_client_reference/index.html b/norfab_client_reference/index.html new file mode 100755 index 0000000..35794ac --- /dev/null +++ b/norfab_client_reference/index.html @@ -0,0 +1,3590 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Client - NORFAB Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + +
    + + + + + + + +
    + +
    + + + + +
    +
    + + + +
    +
    +
    + + + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + + + + +

    Client

    + +
    + + + + +
    + +

    CUDOS¤

    +

    Inspired by Majordomo Protocol Client API, ZeroMQ, Python version.

    +

    Original MDP/Client spec

    +

    Location: http://rfc.zeromq.org/spec:7.

    +

    Author: Min RK benjaminrk@gmail.com

    +

    Based on Java example by Arkadiusz Orzechowski

    + + + +
    + + + + + + + + +
    + + + +

    + NFPClient(broker, name, log_level='WARNING', exit_event=None, event_queue=None) + +¤

    + + +
    +

    + Bases: object

    + + +

    NORFAB Protocol Client API.

    + + +

    Parameters:

    + + + + + + + + + + + + + + + + + + + + + + + +
    NameTypeDescriptionDefault
    broker + +
    +

    str, broker endpoint e.g. tcp://127.0.0.1:5555

    +
    +
    + required +
    name + +
    +

    str, client name, default is NFPClient

    +
    +
    + required +
    + +
    + Source code in norfab\core\client.py +
     82
    + 83
    + 84
    + 85
    + 86
    + 87
    + 88
    + 89
    + 90
    + 91
    + 92
    + 93
    + 94
    + 95
    + 96
    + 97
    + 98
    + 99
    +100
    +101
    +102
    +103
    +104
    +105
    +106
    +107
    +108
    +109
    +110
    +111
    +112
    +113
    +114
    +115
    +116
    def __init__(
    +    self, broker, name, log_level="WARNING", exit_event=None, event_queue=None
    +):
    +    log.setLevel(log_level.upper())
    +    self.name = name
    +    self.zmq_name = f"{self.name}-{uuid4().hex}"
    +    self.broker = broker
    +    self.ctx = zmq.Context()
    +    self.poller = zmq.Poller()
    +    self.reconnect_to_broker()
    +    self.base_dir = f"__norfab__/files/client/{self.name}/"
    +    self.base_dir_jobs = os.path.join(self.base_dir, "jobs")
    +    self.events_dir = os.path.join(self.base_dir, "events")
    +
    +    # create all the folders
    +    os.makedirs(self.base_dir, exist_ok=True)
    +    os.makedirs(self.base_dir_jobs, exist_ok=True)
    +    os.makedirs(self.events_dir, exist_ok=True)
    +
    +    # create queue file
    +    self.queue_filename = os.path.join(
    +        self.base_dir_jobs, f"{self.name}.jobsqueue.txt"
    +    )
    +    if not os.path.exists(self.queue_filename):
    +        with open(self.queue_filename, "w") as f:
    +            pass
    +
    +    self.exit_event = exit_event or threading.Event()
    +    self.recv_queue = queue.Queue(maxsize=0)
    +    self.event_queue = event_queue or queue.Queue(maxsize=1000)
    +
    +    # start receive thread
    +    self.recv_thread = threading.Thread(
    +        target=recv, daemon=True, name=f"{self.name}_recv_thread", args=(self,)
    +    ).start()
    +
    +
    + + + +
    + + + + + + + + + +
    + + +

    + reconnect_to_broker() + +¤

    + + +
    + +

    Connect or reconnect to broker

    + +
    + Source code in norfab\core\client.py +
    129
    +130
    +131
    +132
    +133
    +134
    +135
    +136
    +137
    +138
    +139
    +140
    def reconnect_to_broker(self):
    +    """Connect or reconnect to broker"""
    +    if self.broker_socket:
    +        self.poller.unregister(self.broker_socket)
    +        self.broker_socket.close()
    +    self.broker_socket = self.ctx.socket(zmq.DEALER)
    +    self.broker_socket.setsockopt_unicode(zmq.IDENTITY, self.zmq_name, "utf8")
    +    self.broker_socket.linger = 0
    +    self.broker_socket.connect(self.broker)
    +    self.poller.register(self.broker_socket, zmq.POLLIN)
    +    log.debug(f"{self.name} - client connected to broker at '{self.broker}'")
    +    self.stats_reconnect_to_broker += 1
    +
    +
    +
    + +
    + +
    + + +

    + send_to_broker(command, service, workers, uuid, request) + +¤

    + + +
    + +

    Send message to broker.

    + +
    + Source code in norfab\core\client.py +
    142
    +143
    +144
    +145
    +146
    +147
    +148
    +149
    +150
    +151
    +152
    +153
    +154
    +155
    +156
    +157
    def send_to_broker(self, command, service, workers, uuid, request):
    +    """Send message to broker."""
    +    if command == NFP.POST:
    +        msg = [b"", NFP.CLIENT, command, service, workers, uuid, request]
    +    elif command == NFP.GET:
    +        msg = [b"", NFP.CLIENT, command, service, workers, uuid, request]
    +    else:
    +        log.error(
    +            f"{self.name} - cannot send '{command}' to broker, command unsupported"
    +        )
    +        return
    +
    +    log.debug(f"{self.name} - sending '{msg}'")
    +
    +    self.broker_socket.send_multipart(msg)
    +    self.stats_send_to_broker += 1
    +
    +
    +
    + +
    + +
    + + +

    + rcv_from_broker(command, service, uuid) + +¤

    + + +
    + +

    Wait for response from broker.

    + +
    + Source code in norfab\core\client.py +
    159
    +160
    +161
    +162
    +163
    +164
    +165
    +166
    +167
    +168
    +169
    +170
    +171
    +172
    +173
    +174
    +175
    +176
    +177
    +178
    +179
    +180
    +181
    +182
    +183
    +184
    +185
    +186
    +187
    +188
    +189
    +190
    +191
    +192
    +193
    +194
    +195
    +196
    +197
    +198
    +199
    +200
    +201
    +202
    +203
    +204
    +205
    +206
    +207
    +208
    +209
    def rcv_from_broker(self, command, service, uuid):
    +    """Wait for response from broker."""
    +    retries = 3
    +    while retries > 0:
    +        # check if need to stop
    +        if self.exit_event.is_set():
    +            break
    +        try:
    +            msg = self.recv_queue.get(block=True, timeout=3)
    +            self.recv_queue.task_done()
    +        except queue.Empty:
    +            if retries:
    +                log.warning(
    +                    f"{self.name} - '{uuid}:{service}:{command}' job, "
    +                    f"no reply from broker '{self.broker}', reconnecting"
    +                )
    +                self.reconnect_to_broker()
    +            retries -= 1
    +            continue
    +
    +        (
    +            empty,
    +            reply_header,
    +            reply_command,
    +            reply_service,
    +            reply_uuid,
    +            reply_status,
    +            reply_task_result,
    +        ) = msg
    +
    +        # find message from recv queue for given uuid
    +        if reply_uuid == uuid:
    +            assert (
    +                reply_header == NFP.CLIENT
    +            ), f"Was expecting client header '{NFP.CLIENT}' received '{reply_header}'"
    +            assert (
    +                reply_command == command
    +            ), f"Was expecting reply command '{command}' received '{reply_command}'"
    +            assert (
    +                reply_service == service
    +            ), f"Was expecting reply from '{service}' but received reply from '{reply_service}' service"
    +
    +            return reply_status, reply_task_result
    +        else:
    +            self.recv_queue.put(msg)
    +    else:
    +        log.error(
    +            f"{self.name} - '{uuid}:{service}:{command}' job, "
    +            f"client {retries} retries attempts exceeded"
    +        )
    +        return b"408", b'{"status": "Request Timeout"}'
    +
    +
    +
    + +
    + +
    + + +

    + post(service, task, args=None, kwargs=None, workers='all', uuid=None, timeout=600) + +¤

    + + +
    + +

    Send job request to broker.

    +

    Return dictionary with status, workers, errors keys +containing list of workers acknowledged POST request.

    + +
    + Source code in norfab\core\client.py +
    211
    +212
    +213
    +214
    +215
    +216
    +217
    +218
    +219
    +220
    +221
    +222
    +223
    +224
    +225
    +226
    +227
    +228
    +229
    +230
    +231
    +232
    +233
    +234
    +235
    +236
    +237
    +238
    +239
    +240
    +241
    +242
    +243
    +244
    +245
    +246
    +247
    +248
    +249
    +250
    +251
    +252
    +253
    +254
    +255
    +256
    +257
    +258
    +259
    +260
    +261
    +262
    +263
    +264
    +265
    +266
    +267
    +268
    +269
    +270
    +271
    +272
    +273
    +274
    +275
    +276
    +277
    +278
    +279
    +280
    +281
    +282
    +283
    +284
    +285
    +286
    +287
    +288
    +289
    +290
    +291
    +292
    +293
    +294
    +295
    +296
    +297
    +298
    +299
    +300
    +301
    +302
    +303
    +304
    +305
    +306
    +307
    +308
    +309
    +310
    +311
    +312
    +313
    +314
    +315
    def post(
    +    self,
    +    service: str,
    +    task: str,
    +    args: list = None,
    +    kwargs: dict = None,
    +    workers: str = "all",
    +    uuid: hex = None,
    +    timeout: int = 600,
    +):
    +    """
    +    Send job request to broker.
    +
    +    Return dictionary with ``status``, ``workers``, ``errors`` keys
    +    containing list of workers acknowledged POST request.
    +    """
    +    uuid = uuid or uuid4().hex
    +    args = args or []
    +    kwargs = kwargs or {}
    +    ret = {"status": b"200", "workers": [], "errors": []}
    +
    +    if not isinstance(service, bytes):
    +        service = service.encode("utf-8")
    +
    +    if not isinstance(uuid, bytes):
    +        uuid = uuid.encode("utf-8")
    +
    +    workers = self._make_workers(workers)
    +
    +    request = json.dumps(
    +        {"task": task, "kwargs": kwargs or {}, "args": args or []}
    +    ).encode("utf-8")
    +
    +    # run POST response loop
    +    start_time = time.time()
    +    while timeout > time.time() - start_time:
    +        # check if need to stop
    +        if self.exit_event.is_set():
    +            return ret
    +        self.send_to_broker(
    +            NFP.POST, service, workers, uuid, request
    +        )  # 1 send POST to broker
    +        status, post_response = self.rcv_from_broker(
    +            NFP.RESPONSE, service, uuid
    +        )  # 2 receive RESPONSE from broker
    +        if status == b"202":  # 3 go over RESPONSE status and decide what to do
    +            break
    +        else:
    +            msg = f"{self.name} - '{uuid}' job, POST Request not accepted by broker '{post_response}'"
    +            log.error(msg)
    +            ret["errors"].append(msg)
    +            ret["status"] = status
    +            return ret
    +    else:
    +        msg = f"{self.name} - '{uuid}' job, broker POST Request Timeout"
    +        log.error(msg)
    +        ret["errors"].append(msg)
    +        ret["status"] = b"408"
    +        return ret
    +
    +    # get a list of workers where job was dispatched to
    +    post_response = json.loads(post_response)
    +    workers_dispatched = set(post_response["workers"])
    +    log.debug(
    +        f"{self.name} - broker dispatched job '{uuid}' POST request to workers {workers_dispatched}"
    +    )
    +
    +    # wait workers to ACK POSTed job
    +    start_time = time.time()
    +    workers_acked = set()
    +    while timeout > time.time() - start_time:
    +        # check if need to stop
    +        if self.exit_event.is_set():
    +            return ret
    +        status, response = self.rcv_from_broker(NFP.RESPONSE, service, uuid)
    +        response = json.loads(response)
    +        if status == b"202":  # ACCEPTED
    +            log.debug(
    +                f"{self.name} - '{uuid}' job, acknowledged by worker '{response}'"
    +            )
    +            workers_acked.add(response["worker"])
    +            if workers_acked == workers_dispatched:
    +                break
    +        else:
    +            msg = (
    +                f"{self.name} - '{uuid}:{service}:{task}' job, "
    +                f"unexpected POST request status '{status}', response '{response}'"
    +            )
    +            log.error(msg)
    +            ret["errors"].append(msg)
    +    else:
    +        msg = (
    +            f"{self.name} - '{uuid}' job, POST request timeout exceeded, these workers did not "
    +            f"acknowledge the job {workers_dispatched - workers_acked}"
    +        )
    +        log.error(msg)
    +        ret["errors"].append(msg)
    +        ret["status"] = b"408"
    +
    +    ret["workers"] = list(workers_acked)
    +    ret["status"] = ret["status"].decode("utf-8")
    +
    +    log.debug(f"{self.name} - '{uuid}' job POST request completed '{ret}'")
    +
    +    return ret
    +
    +
    +
    + +
    + +
    + + +

    + get(service, task, args=None, kwargs=None, workers='all', uuid=None, timeout=600) + +¤

    + + +
    + +

    S +end job reply message to broker requesting job results.

    + + +

    Parameters:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    NameTypeDescriptionDefault
    service + str + +
    +

    mandatory, service name to target

    +
    +
    + required +
    task + str + +
    +

    mandatory, service task name to run

    +
    +
    + required +
    args + list + +
    +

    optional, list of position argument for the task

    +
    +
    + None +
    kwargs + dict + +
    +

    optional, dictionary of key-word arguments for the task

    +
    +
    + None +
    workers + str + +
    +

    optional, workers to target - all, any, or list of workers names

    +
    +
    + 'all' +
    uuid + hex + +
    +

    optional, unique job identifier

    +
    +
    + None +
    timeout + int + +
    +

    optional, job timeout in seconds, for how long client waits for job result before giving up Returns dictionary of status, results and errors keys, where results key is a dictionary keyed by workers' names, and errors is a list of error strings.

    +
    +
    + 600 +
    + +
    + Source code in norfab\core\client.py +
    317
    +318
    +319
    +320
    +321
    +322
    +323
    +324
    +325
    +326
    +327
    +328
    +329
    +330
    +331
    +332
    +333
    +334
    +335
    +336
    +337
    +338
    +339
    +340
    +341
    +342
    +343
    +344
    +345
    +346
    +347
    +348
    +349
    +350
    +351
    +352
    +353
    +354
    +355
    +356
    +357
    +358
    +359
    +360
    +361
    +362
    +363
    +364
    +365
    +366
    +367
    +368
    +369
    +370
    +371
    +372
    +373
    +374
    +375
    +376
    +377
    +378
    +379
    +380
    +381
    +382
    +383
    +384
    +385
    +386
    +387
    +388
    +389
    +390
    +391
    +392
    +393
    +394
    +395
    +396
    +397
    +398
    +399
    +400
    +401
    +402
    +403
    +404
    +405
    +406
    +407
    +408
    +409
    +410
    +411
    +412
    +413
    +414
    +415
    +416
    +417
    +418
    +419
    +420
    +421
    +422
    +423
    +424
    +425
    +426
    +427
    +428
    +429
    +430
    +431
    +432
    +433
    +434
    +435
    +436
    +437
    +438
    +439
    def get(
    +    self,
    +    service: str,
    +    task: str,
    +    args: list = None,
    +    kwargs: dict = None,
    +    workers: str = "all",
    +    uuid: hex = None,
    +    timeout: int = 600,
    +):
    +    """S
    +    end job reply message to broker requesting job results.
    +
    +    :param service: mandatory, service name to target
    +    :param task: mandatory, service task name to run
    +    :param args: optional, list of position argument for the task
    +    :param kwargs: optional, dictionary of key-word arguments for the task
    +    :param workers: optional, workers to target - ``all``, ``any``, or
    +        list of workers names
    +    :param uuid: optional, unique job identifier
    +    :param timeout: optional, job timeout in seconds, for how long client
    +        waits for job result before giving up
    +
    +    Returns dictionary of ``status``, ``results`` and ``errors`` keys,
    +    where ``results`` key is a dictionary keyed by workers' names, and
    +    ``errors`` is a list of error strings.
    +    """
    +    uuid = uuid or uuid4().hex
    +    args = args or []
    +    kwargs = kwargs or {}
    +    wkrs = {
    +        "requested": workers,
    +        "done": set(),
    +        "dispatched": set(),
    +        "pending": set(),
    +    }
    +    ret = {"status": b"200", "results": {}, "errors": [], "workers": wkrs}
    +
    +    if not isinstance(service, bytes):
    +        service = service.encode("utf-8")
    +
    +    if not isinstance(uuid, bytes):
    +        uuid = uuid.encode("utf-8")
    +
    +    workers = self._make_workers(workers)
    +
    +    request = json.dumps(
    +        {"task": task, "kwargs": kwargs or {}, "args": args or []}
    +    ).encode("utf-8")
    +
    +    # run GET response loop
    +    start_time = time.time()
    +    while timeout > time.time() - start_time:
    +        # check if need to stop
    +        if self.exit_event.is_set():
    +            return None
    +        # dispatch GET request to workers
    +        self.send_to_broker(NFP.GET, service, workers, uuid, request)
    +        status, get_response = self.rcv_from_broker(NFP.RESPONSE, service, uuid)
    +        ret["status"] = status
    +        # received actual GET request results from broker e.g. MMI, SID or FSS services
    +        if status == b"200":
    +            ret["results"] = get_response.decode("utf-8")
    +            break
    +        # received DISPATCH response from broker
    +        if status != b"202":
    +            msg = f"{status}, {self.name} job '{uuid}' GET Request not accepted by broker '{get_response}'"
    +            log.error(msg)
    +            ret["errors"].append(msg)
    +            break
    +        get_response = json.loads(get_response)
    +        wkrs["dispatched"] = set(get_response["workers"])
    +        # collect GET responses from individual workers
    +        workers_responded = set()
    +        while timeout > time.time() - start_time:
    +            # check if need to stop
    +            if self.exit_event.is_set():
    +                return None
    +            status, response = self.rcv_from_broker(NFP.RESPONSE, service, uuid)
    +            log.debug(
    +                f"{self.name} - job '{uuid}' response from worker '{response}'"
    +            )
    +            response = json.loads(response)
    +            if status == b"200":  # OK
    +                ret["results"].update(response)
    +                log.debug(
    +                    f"{self.name} - job '{uuid}' results returned by worker '{response}'"
    +                )
    +                for w in response.keys():
    +                    wkrs["done"].add(w)
    +                    workers_responded.add(w)
    +                    if w in wkrs["pending"]:
    +                        wkrs["pending"].remove(w)
    +                if wkrs["done"] == wkrs["dispatched"]:
    +                    break
    +            elif status == b"300":  # PENDING
    +                # set status to pending if at least one worker is pending
    +                ret["status"] = b"300"
    +                wkrs["pending"].add(response["worker"])
    +                workers_responded.add(response["worker"])
    +            else:
    +                if response.get("worker"):
    +                    workers_responded.add(response["worker"])
    +                msg = (
    +                    f"{self.name} - '{uuid}:{service}:{task}' job, "
    +                    f"unexpected GET Response status '{status}', response '{response}'"
    +                )
    +                log.error(msg)
    +                ret["errors"].append(msg)
    +            if workers_responded == wkrs["dispatched"]:
    +                break
    +        if wkrs["done"] == wkrs["dispatched"]:
    +            break
    +        time.sleep(0.2)
    +    else:
    +        msg = f"{self.name} - '{uuid}' job, broker {timeout}s GET request timeout expired"
    +        log.error(msg)
    +        ret["errors"].append(msg)
    +        ret["status"] = b"408"
    +
    +    ret["status"] = ret["status"].decode("utf-8")
    +
    +    return ret
    +
    +
    +
    + +
    + +
    + + +

    + get_iter(service, task, args=None, kwargs=None, workers='all', uuid=None, timeout=600) + +¤

    + + +
    + +

    Send job reply message to broker requesting job results.

    + +
    + Source code in norfab\core\client.py +
    441
    +442
    +443
    +444
    +445
    +446
    +447
    +448
    +449
    +450
    +451
    +452
    +453
    +454
    +455
    +456
    +457
    +458
    +459
    +460
    +461
    +462
    +463
    +464
    +465
    +466
    +467
    +468
    +469
    +470
    +471
    +472
    +473
    +474
    +475
    +476
    +477
    +478
    +479
    +480
    +481
    +482
    +483
    +484
    +485
    +486
    +487
    +488
    +489
    +490
    +491
    +492
    +493
    +494
    +495
    +496
    +497
    +498
    +499
    +500
    +501
    +502
    +503
    +504
    +505
    +506
    +507
    +508
    +509
    +510
    +511
    +512
    +513
    +514
    +515
    +516
    +517
    +518
    +519
    def get_iter(
    +    self,
    +    service: str,
    +    task: str,
    +    args: list = None,
    +    kwargs: dict = None,
    +    workers: str = "all",
    +    uuid: hex = None,
    +    timeout: int = 600,
    +):
    +    """Send job reply message to broker requesting job results."""
    +    uuid = uuid or uuid4().hex
    +    args = args or []
    +    kwargs = kwargs or {}
    +
    +    if not isinstance(service, bytes):
    +        service = service.encode("utf-8")
    +
    +    if not isinstance(uuid, bytes):
    +        uuid = uuid.encode("utf-8")
    +
    +    workers = self._make_workers(workers)
    +
    +    request = json.dumps(
    +        {"task": task, "kwargs": kwargs or {}, "args": args or []}
    +    ).encode("utf-8")
    +
    +    # run GET response loop
    +    start_time = time.time()
    +    workers_done = set()
    +    while timeout > time.time() - start_time:
    +        # check if need to stop
    +        if self.exit_event.is_set():
    +            break
    +        # dispatch GET request to workers
    +        self.send_to_broker(NFP.GET, service, workers, uuid, request)
    +        status, get_response = self.rcv_from_broker(NFP.RESPONSE, service, uuid)
    +        # received DISPATCH response from broker
    +        if status != b"202":
    +            msg = f"{status}, {self.name} job '{uuid}' GET Request not accepted by broker '{get_response}'"
    +            log.error(msg)
    +            break
    +        get_response = json.loads(get_response)
    +        workers_dispatched = set(get_response["workers"])
    +        # collect GET responses from workers
    +        workers_responded = set()
    +        while timeout > time.time() - start_time:
    +            # check if need to stop
    +            if self.exit_event.is_set():
    +                break
    +            status, response = self.rcv_from_broker(NFP.RESPONSE, service, uuid)
    +            log.debug(
    +                f"{self.name} - job '{uuid}' response from worker '{response}'"
    +            )
    +            response = json.loads(response)
    +            if status == b"200":  # OK
    +                log.debug(
    +                    f"{self.name} - job '{uuid}' results returned by worker '{response}'"
    +                )
    +                yield response
    +                for w in response.keys():
    +                    workers_done.add(w)
    +                    workers_responded.add(w)
    +                if workers_done == workers_dispatched:
    +                    break
    +            elif status == b"300":  # PENDING
    +                workers_responded.add(response["worker"])
    +            else:
    +                msg = f"{self.name} - unexpected GET Response status '{status}', response '{response}'"
    +                log.error(msg)
    +                ret["errors"].append(msg)
    +            if workers_responded == workers_dispatched:
    +                break
    +        if workers_done == workers_dispatched:
    +            break
    +        time.sleep(0.2)
    +    else:
    +        msg = f"408, {self.name} job '{uuid}' broker GET Request Timeout"
    +        log.error(msg)
    +
    +
    +
    + +
    + +
    + + +

    + fetch_file(url, destination=None, chunk_size=250000, pipiline=10, timeout=600, read=False) + +¤

    + + +
    + +

    Function to download file from Broker File Sharing Service.

    + + +

    Parameters:

    + + + + + + + + + + + + + + + + + + + + + + + +
    NameTypeDescriptionDefault
    url + str + +
    +

    (str), path to file relative to base_dir

    +
    +
    + required +
    destination + str + +
    +

    (str), if provided destination to save file, returns file content otherwise

    +
    +
    + None +
    + +
    + Source code in norfab\core\client.py +
    521
    +522
    +523
    +524
    +525
    +526
    +527
    +528
    +529
    +530
    +531
    +532
    +533
    +534
    +535
    +536
    +537
    +538
    +539
    +540
    +541
    +542
    +543
    +544
    +545
    +546
    +547
    +548
    +549
    +550
    +551
    +552
    +553
    +554
    +555
    +556
    +557
    +558
    +559
    +560
    +561
    +562
    +563
    +564
    +565
    +566
    +567
    +568
    +569
    +570
    +571
    +572
    +573
    +574
    +575
    +576
    +577
    +578
    +579
    +580
    +581
    +582
    +583
    +584
    +585
    +586
    +587
    +588
    +589
    +590
    +591
    +592
    +593
    +594
    +595
    +596
    +597
    +598
    +599
    +600
    +601
    +602
    +603
    +604
    +605
    +606
    +607
    +608
    +609
    +610
    +611
    +612
    +613
    +614
    +615
    +616
    +617
    +618
    +619
    +620
    +621
    +622
    +623
    +624
    +625
    +626
    +627
    +628
    +629
    +630
    +631
    +632
    +633
    +634
    +635
    +636
    +637
    +638
    +639
    def fetch_file(
    +    self,
    +    url: str,
    +    destination: str = None,
    +    chunk_size: int = 250000,
    +    pipiline: int = 10,
    +    timeout: int = 600,
    +    read: bool = False,
    +):
    +    """
    +    Function to download file from Broker File Sharing Service.
    +
    +    :param url: (str), path to file relative to ``base_dir``
    +    :param destination: (str), if provided destination to save file,
    +        returns file content otherwise
    +    """
    +    uuid = str(uuid4().hex).encode("utf-8")
    +    total = 0  # Total bytes received
    +    chunks = 0  # Total chunks received
    +    offset = 0  # Offset of next chunk request
    +    credit = pipiline  # Up to PIPELINE chunks in transit
    +    service = b"fss.service.broker"
    +    workers = b"any"
    +    reply = ""
    +    status = "200"
    +    downloaded = False
    +    md5hash = None
    +
    +    # define file destination
    +    if destination is None:
    +        destination = os.path.join(
    +            self.base_dir, "fetchedfiles", *os.path.split(url.replace("nf://", ""))
    +        )
    +
    +    # make sure all destination directories exist
    +    os.makedirs(os.path.split(destination)[0], exist_ok=True)
    +
    +    # get file details
    +    request = json.dumps({"task": "file_details", "kwargs": {"url": url}}).encode(
    +        "utf-8"
    +    )
    +    self.send_to_broker(NFP.GET, service, workers, uuid, request)
    +    rcv_status, file_details = self.rcv_from_broker(NFP.RESPONSE, service, uuid)
    +    file_details = json.loads(file_details)
    +
    +    # check if file already downloaded
    +    if os.path.isfile(destination):
    +        file_hash = hashlib.md5()
    +        with open(destination, "rb") as f:
    +            chunk = f.read(8192)
    +            while chunk:
    +                file_hash.update(chunk)
    +                chunk = f.read(8192)
    +        md5hash = file_hash.hexdigest()
    +        downloaded = md5hash == file_details["md5hash"]
    +        log.debug(f"{self.name} - file already downloaded, nothing to do")
    +
    +    # fetch file content from broker and save to local file
    +    if file_details["exists"] is True and downloaded is False:
    +        file_hash = hashlib.md5()
    +        with open(destination, "wb") as dst_file:
    +            start_time = time.time()
    +            while timeout > time.time() - start_time:
    +                # check if need to stop
    +                if self.exit_event.is_set():
    +                    return "400", ""
    +                # ask for chunks
    +                while credit:
    +                    request = json.dumps(
    +                        {
    +                            "task": "fetch_file",
    +                            "kwargs": {
    +                                "offset": offset,
    +                                "chunk_size": chunk_size,
    +                                "url": url,
    +                            },
    +                        }
    +                    ).encode("utf-8")
    +                    self.send_to_broker(NFP.GET, service, workers, uuid, request)
    +                    offset += chunk_size
    +                    credit -= 1
    +                # receive chunks from broker
    +                status, chunk = self.rcv_from_broker(NFP.RESPONSE, service, uuid)
    +                log.debug(
    +                    f"{self.name} - status '{status}', chunk '{chunks}', downloaded '{total}'"
    +                )
    +                dst_file.write(chunk)
    +                file_hash.update(chunk)
    +                chunks += 1
    +                credit += 1
    +                size = len(chunk)
    +                total += size
    +                if size < chunk_size:
    +                    break  # Last chunk received; exit
    +            else:
    +                reply = "File download failed - timeout"
    +                status = "408"
    +        # verify md5hash
    +        md5hash = file_hash.hexdigest()
    +    elif file_details["exists"] is False:
    +        reply = "File download failed - file not found"
    +        status = "404"
    +
    +    # decide on what to reply and status
    +    if file_details["exists"] is not True:
    +        reply = reply
    +    elif md5hash != file_details["md5hash"]:
    +        reply = "File download failed - MD5 hash mismatch"
    +        status = "417"
    +    elif read:
    +        with open(destination, "r", encoding="utf-8") as f:
    +            reply = f.read()
    +    else:
    +        reply = destination
    +    # decode status
    +    if isinstance(status, bytes):
    +        status = status.decode("utf-8")
    +
    +    return status, reply
    +
    +
    +
    + +
    + +
    + + +

    + run_job(service, task, uuid=None, args=None, kwargs=None, workers='all', timeout=600, retry=10) + +¤

    + + +
    + +

    Run job and return results produced by workers.

    + + +

    Parameters:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    NameTypeDescriptionDefault
    service + str + +
    +

    str, service name to send request to

    +
    +
    + required +
    task + str + +
    +

    str, task name to run for given service

    +
    +
    + required +
    uuid + str + +
    +

    (str) Job ID to use

    +
    +
    + None +
    args + list + +
    +

    list, task arguments

    +
    +
    + None +
    kwargs + dict + +
    +

    dict, task key-word arguments

    +
    +
    + None +
    workers + str + +
    +

    str or list, worker names to target

    +
    +
    + 'all' +
    timeout + int + +
    +

    overall job timeout in seconds

    +
    +
    + 600 +
    retry + +
    +

    number of times to try and GET job results

    +
    +
    + 10 +
    + +
    + Source code in norfab\core\client.py +
    641
    +642
    +643
    +644
    +645
    +646
    +647
    +648
    +649
    +650
    +651
    +652
    +653
    +654
    +655
    +656
    +657
    +658
    +659
    +660
    +661
    +662
    +663
    +664
    +665
    +666
    +667
    +668
    +669
    +670
    +671
    +672
    +673
    +674
    +675
    +676
    +677
    +678
    +679
    +680
    +681
    +682
    +683
    +684
    +685
    +686
    +687
    +688
    +689
    +690
    +691
    +692
    +693
    +694
    +695
    +696
    +697
    +698
    +699
    +700
    +701
    +702
    +703
    +704
    +705
    +706
    +707
    def run_job(
    +    self,
    +    service: str,
    +    task: str,
    +    uuid: str = None,
    +    args: list = None,
    +    kwargs: dict = None,
    +    workers: str = "all",
    +    timeout: int = 600,
    +    retry=10,
    +):
    +    """
    +    Run job and return results produced by workers.
    +
    +    :param service: str, service name to send request to
    +    :param task: str, task name to run for given service
    +    :param uuid: (str) Job ID to use
    +    :param args: list, task arguments
    +    :param kwargs: dict, task key-word arguments
    +    :param workers: str or list, worker names to target
    +    :param timeout: overall job timeout in seconds
    +    :param retry: number of times to try and GET job results
    +    """
    +    uuid = uuid or uuid4().hex
    +    start_time = int(time.time())
    +
    +    # POST job to workers
    +    post_result = self.post(service, task, args, kwargs, workers, uuid, timeout)
    +    if post_result["status"] != "200":
    +        log.error(
    +            f"{self.name}:run_job - {service}:{task} POST status "
    +            f"to '{workers}' workers is not 200 - '{post_result}'"
    +        )
    +        return None
    +
    +    remaining_timeout = timeout - (time.time() - start_time)
    +    get_timeout = remaining_timeout / retry
    +
    +    # GET job results
    +    while retry:
    +        get = self.get(
    +            service, task, [], {}, post_result["workers"], uuid, get_timeout
    +        )
    +        if self.exit_event.is_set():
    +            break
    +        elif get["status"] == "300":  # PENDING
    +            retry -= 1
    +            log.debug(
    +                f"{self.name}:run_job - {service}:{task}:{uuid} GET "
    +                f"results pending, keep waiting"
    +            )
    +            continue
    +        elif get["status"] == "408":  # TIMEOUT
    +            retry -= 1
    +            log.debug(
    +                f"{self.name}:run_job - {service}:{task}:{uuid} GET "
    +                f"results {get_timeout}s timeout expired, keep waiting"
    +            )
    +            continue
    +        elif get["status"] in ["200", "202"]:  # OK
    +            return get["results"]
    +        else:
    +            log.error(
    +                f"{self.name}:run_job - {service}:{task}:{uuid} "
    +                f"stopping, GET returned unexpected results - '{get}'"
    +            )
    +            return None
    +
    +
    +
    + +
    + +
    + + +

    + run_job_iter(service, task, uuid=None, args=None, kwargs=None, workers='all', timeout=600) + +¤

    + + +
    + +

    Iter run_job allows to return job results from workers progressively +as they are responded, rather than waiting for workers to respond first. +This should allow to client an interactive experience for the user where +job results would be presented as soon as they are available.

    + + +

    Parameters:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    NameTypeDescriptionDefault
    service + str + +
    +

    str, service name to send request to

    +
    +
    + required +
    task + str + +
    +

    str, task name to run for given service

    +
    +
    + required +
    uuid + str + +
    +

    (str) Job ID to use

    +
    +
    + None +
    args + list + +
    +

    list, task arguments

    +
    +
    + None +
    kwargs + dict + +
    +

    dict, task key-word arguments

    +
    +
    + None +
    workers + str + +
    +

    str or list, worker names to target

    +
    +
    + 'all' +
    + +
    + Source code in norfab\core\client.py +
    709
    +710
    +711
    +712
    +713
    +714
    +715
    +716
    +717
    +718
    +719
    +720
    +721
    +722
    +723
    +724
    +725
    +726
    +727
    +728
    +729
    +730
    +731
    +732
    +733
    +734
    +735
    +736
    +737
    +738
    +739
    +740
    +741
    def run_job_iter(
    +    self,
    +    service: str,
    +    task: str,
    +    uuid: str = None,
    +    args: list = None,
    +    kwargs: dict = None,
    +    workers: str = "all",
    +    timeout: int = 600,
    +):
    +    """
    +    Iter run_job allows to return job results from workers progressively
    +    as they are responded, rather than waiting for workers to respond first.
    +    This should allow to client an interactive experience for the user where
    +    job results would be presented as soon as they are available.
    +
    +    :param service: str, service name to send request to
    +    :param task: str, task name to run for given service
    +    :param uuid: (str) Job ID to use
    +    :param args: list, task arguments
    +    :param kwargs: dict, task key-word arguments
    +    :param workers: str or list, worker names to target
    +    """
    +    uuid = uuid or uuid4().hex
    +
    +    # POST job to workers
    +    post_result = self.post(service, task, args, kwargs, workers, uuid, timeout)
    +
    +    # GET job results
    +    for result in self.get_iter(
    +        service, task, [], {}, post_result["workers"], uuid, timeout
    +    ):
    +        yield result
    +
    +
    +
    + +
    + + + +
    + +
    + +
    + + +
    + + +

    + event_filename(suuid, events_dir) + +¤

    + + +
    + +

    Returns freshly allocated event filename for given UUID str

    + +
    + Source code in norfab\core\client.py +
    38
    +39
    +40
    +41
    def event_filename(suuid: str, events_dir: str):
    +    """Returns freshly allocated event filename for given UUID str"""
    +    suuid = suuid.decode("utf-8") if isinstance(suuid, bytes) else suuid
    +    return os.path.join(events_dir, f"{suuid}.json")
    +
    +
    +
    + +
    + +
    + + +

    + recv(client) + +¤

    + + +
    + +

    Thread to process receive messages from broker.

    + +
    + Source code in norfab\core\client.py +
    44
    +45
    +46
    +47
    +48
    +49
    +50
    +51
    +52
    +53
    +54
    +55
    +56
    +57
    +58
    +59
    +60
    +61
    +62
    def recv(client):
    +    """Thread to process receive messages from broker."""
    +    while not client.exit_event.is_set():
    +        # Poll socket for messages every timeout interval
    +        try:
    +            items = client.poller.poll(1000)
    +        except KeyboardInterrupt:
    +            break  # Interrupted
    +        except:
    +            continue
    +        if items:
    +            msg = client.broker_socket.recv_multipart()
    +            log.debug(f"{client.name} - received '{msg}'")
    +            if msg[2] == NFP.EVENT:
    +                client.event_queue.put(msg)
    +                client.stats_recv_event_from_broker += 1
    +            else:
    +                client.recv_queue.put(msg)
    +                client.stats_recv_from_broker += 1
    +
    +
    +
    + +
    + + + +
    + +
    + +
    + + + + + + + + + + + + + +
    +
    + + + +
    + + + +
    + + + +
    +
    +
    +
    + +
    + + + + + + + + + + \ No newline at end of file diff --git a/norfab_exceptions_reference/index.html b/norfab_exceptions_reference/index.html index 8c675fe..812a4eb 100755 --- a/norfab_exceptions_reference/index.html +++ b/norfab_exceptions_reference/index.html @@ -869,7 +869,7 @@
  • - + @@ -890,7 +890,7 @@
  • - + diff --git a/norfab_getting_started/index.html b/norfab_getting_started/index.html index 9390d74..21948ec 100755 --- a/norfab_getting_started/index.html +++ b/norfab_getting_started/index.html @@ -867,7 +867,7 @@
  • - + @@ -888,7 +888,7 @@
  • - + diff --git a/norfab_installation/index.html b/norfab_installation/index.html index eafa40f..0e79c99 100755 --- a/norfab_installation/index.html +++ b/norfab_installation/index.html @@ -951,7 +951,7 @@
  • - + @@ -972,7 +972,7 @@
  • - + diff --git a/norfab_inventory/index.html b/norfab_inventory/index.html index 09c7b08..314c1f0 100755 --- a/norfab_inventory/index.html +++ b/norfab_inventory/index.html @@ -949,7 +949,7 @@
  • - + @@ -970,7 +970,7 @@
  • - + diff --git a/norfab_inventory_reference/index.html b/norfab_inventory_reference/index.html index 1cee016..643e006 100755 --- a/norfab_inventory_reference/index.html +++ b/norfab_inventory_reference/index.html @@ -15,7 +15,7 @@ - + @@ -869,7 +869,7 @@
  • - + @@ -890,7 +890,7 @@
  • - + diff --git a/norfab_nfapi_reference/index.html b/norfab_nfapi_reference/index.html index b7e5668..de34910 100755 --- a/norfab_nfapi_reference/index.html +++ b/norfab_nfapi_reference/index.html @@ -932,7 +932,7 @@
  • - + @@ -953,7 +953,7 @@
  • - + diff --git a/norfab_python_api_overview/index.html b/norfab_python_api_overview/index.html index fc13bd0..9a229a6 100755 --- a/norfab_python_api_overview/index.html +++ b/norfab_python_api_overview/index.html @@ -874,7 +874,7 @@
  • - + @@ -895,7 +895,7 @@
  • - + diff --git a/norfab_release_notes/index.html b/norfab_release_notes/index.html index 7daa4a6..2e1de6f 100755 --- a/norfab_release_notes/index.html +++ b/norfab_release_notes/index.html @@ -867,7 +867,7 @@
  • - + @@ -888,7 +888,7 @@
  • - + diff --git a/norfab_rest_api_overview/index.html b/norfab_rest_api_overview/index.html index 1dedc10..f2c0dac 100755 --- a/norfab_rest_api_overview/index.html +++ b/norfab_rest_api_overview/index.html @@ -867,7 +867,7 @@
  • - + @@ -888,7 +888,7 @@
  • - + diff --git a/norfab_worker_reference/index.html b/norfab_worker_reference/index.html new file mode 100755 index 0000000..15732b7 --- /dev/null +++ b/norfab_worker_reference/index.html @@ -0,0 +1,2743 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Worker - NORFAB Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    + +
    + + + + + + +
    + + + + + + + +
    + +
    + + + + +
    +
    + + + +
    +
    +
    + + + + + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    +
    + + + +
    +
    + + + + + + + +

    Worker

    + +
    + + + + +
    + +

    CUDOS¤

    +

    Inspired by Majordomo Protocol Worker API, ZeroMQ, Python version.

    +

    Original MDP/Worker spec

    +

    Location: http://rfc.zeromq.org/spec:7.

    +

    Author: Min RK benjaminrk@gmail.com

    +

    Based on Java example by Arkadiusz Orzechowski

    + + + +
    + + + + + + + + +
    + + + +

    + WorkerWatchDog(worker) + +¤

    + + +
    +

    + Bases: Thread

    + + +

    Class to monitor worker performance

    + +
    + Source code in norfab\core\worker.py +
    47
    +48
    +49
    +50
    +51
    +52
    +53
    +54
    +55
    +56
    +57
    +58
    +59
    +60
    +61
    +62
    +63
    def __init__(self, worker):
    +    super().__init__()
    +    self.worker = worker
    +    self.worker_process = psutil.Process(os.getpid())
    +
    +    # extract inventory attributes
    +    self.watchdog_interval = worker.inventory.get("watchdog_interval", 30)
    +    self.memory_threshold_mbyte = worker.inventory.get(
    +        "memory_threshold_mbyte", 1000
    +    )
    +    self.memory_threshold_action = worker.inventory.get(
    +        "memory_threshold_action", "log"
    +    )
    +
    +    # initiate variables
    +    self.runs = 0
    +    self.watchdog_tasks = []
    +
    +
    + + + +
    + + + + + + + + + +
    + + +

    + get_ram_usage() + +¤

    + + +
    + +

    Return RAM usage in Mbyte

    + +
    + Source code in norfab\core\worker.py +
    82
    +83
    +84
    def get_ram_usage(self):
    +    """Return RAM usage in Mbyte"""
    +    return self.worker_process.memory_info().rss / 1024000
    +
    +
    +
    + +
    + + + +
    + +
    + +
    + +
    + + + +

    + Result(result=None, failed=False, errors=None, task=None, messages=None) + +¤

    + + +
    + + +

    Result of running individual tasks.

    +

    Attributes/Arguments:

    + + +

    Parameters:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    NameTypeDescriptionDefault
    changed + +
    +

    True if the task is changing the system

    +
    +
    + required +
    result + Any + +
    +

    Result of the task execution, see task's documentation for details

    +
    +
    + None +
    failed + bool + +
    +

    Whether the execution failed or not

    +
    +
    + False +
    (logging.LEVEL) + severity_level + +
    +

    Severity level associated to the result of the execution

    +
    +
    + required +
    errors + Optional[List[str]] + +
    +

    exception thrown during the execution of the task (if any)

    +
    +
    + None +
    task + str + +
    +

    Task function name that produced the results

    +
    +
    + None +
    + +
    + Source code in norfab\core\worker.py +
    128
    +129
    +130
    +131
    +132
    +133
    +134
    +135
    +136
    +137
    +138
    +139
    +140
    def __init__(
    +    self,
    +    result: Any = None,
    +    failed: bool = False,
    +    errors: Optional[List[str]] = None,
    +    task: str = None,
    +    messages: Optional[List[str]] = None,
    +) -> None:
    +    self.task = task
    +    self.result = result
    +    self.failed = failed
    +    self.errors = errors or []
    +    self.messages = messages or []
    +
    +
    + + + +
    + + + + + + + + + +
    + + +

    + dictionary() + +¤

    + + +
    + +

    Method to serialize result as a dictionary

    + +
    + Source code in norfab\core\worker.py +
    151
    +152
    +153
    +154
    +155
    +156
    +157
    +158
    +159
    +160
    +161
    +162
    +163
    +164
    def dictionary(self):
    +    """Method to serialize result as a dictionary"""
    +    if not isinstance(self.errors, list):
    +        self.errors = [self.errors]
    +    if not isinstance(self.messages, list):
    +        self.messages = [self.messages]
    +
    +    return {
    +        "task": self.task,
    +        "failed": self.failed,
    +        "errors": self.errors,
    +        "result": self.result,
    +        "messages": self.messages,
    +    }
    +
    +
    +
    + +
    + + + +
    + +
    + +
    + +
    + + + +

    + NFPWorker(broker, service, name, exit_event, log_level='WARNING', multiplier=6, keepalive=2500) + +¤

    + + +
    + + + + + +

    Parameters:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    NameTypeDescriptionDefault
    broker + str + +
    +

    str, broker endpoint e.g. tcp://127.0.0.1:5555

    +
    +
    + required +
    service + str + +
    +

    str, service name

    +
    +
    + required +
    name + str + +
    +

    str, worker name

    +
    +
    + required +
    exist_event + +
    +

    obj, threading event, if set signal worker to stop

    +
    +
    + required +
    multiplier + int + +
    +

    int, number of keepalives lost before consider other party dead

    +
    +
    + 6 +
    keepalive + int + +
    +

    int, keepalive interval in milliseconds

    +
    +
    + 2500 +
    + +
    + Source code in norfab\core\worker.py +
    390
    +391
    +392
    +393
    +394
    +395
    +396
    +397
    +398
    +399
    +400
    +401
    +402
    +403
    +404
    +405
    +406
    +407
    +408
    +409
    +410
    +411
    +412
    +413
    +414
    +415
    +416
    +417
    +418
    +419
    +420
    +421
    +422
    +423
    +424
    +425
    +426
    +427
    +428
    +429
    +430
    +431
    +432
    +433
    +434
    +435
    +436
    +437
    +438
    +439
    +440
    +441
    +442
    +443
    +444
    +445
    +446
    +447
    +448
    +449
    +450
    +451
    +452
    +453
    +454
    +455
    +456
    +457
    +458
    def __init__(
    +    self,
    +    broker: str,
    +    service: str,
    +    name: str,
    +    exit_event,
    +    log_level: str = "WARNING",
    +    multiplier: int = 6,
    +    keepalive: int = 2500,
    +):
    +    log.setLevel(log_level.upper())
    +    self.log_level = log_level
    +    self.broker = broker
    +    self.service = service
    +    self.name = name
    +    self.exit_event = exit_event
    +    self.broker_socket = None
    +    self.socket_lock = (
    +        threading.Lock()
    +    )  # used for keepalives to protect socket object
    +    self.base_dir = f"__norfab__/files/worker/{self.name}/"
    +    self.base_dir_jobs = os.path.join(self.base_dir, "jobs")
    +
    +    self.ctx = zmq.Context()
    +    self.poller = zmq.Poller()
    +    self.reconnect_to_broker()
    +
    +    self.destroy_event = threading.Event()
    +    self.request_thread = None
    +    self.reply_thread = None
    +    self.close_thread = None
    +    self.recv_thread = None
    +    self.event_thread = None
    +
    +    self.post_queue = queue.Queue(maxsize=0)
    +    self.get_queue = queue.Queue(maxsize=0)
    +    self.delete_queue = queue.Queue(maxsize=0)
    +    self.event_queue = queue.Queue(maxsize=0)
    +
    +    # create queue file
    +    os.makedirs(self.base_dir, exist_ok=True)
    +    os.makedirs(self.base_dir_jobs, exist_ok=True)
    +    self.queue_filename = os.path.join(self.base_dir_jobs, f"{self.name}.queue.txt")
    +    if not os.path.exists(self.queue_filename):
    +        with open(self.queue_filename, "w") as f:
    +            pass
    +    self.queue_done_filename = os.path.join(
    +        self.base_dir_jobs, f"{self.name}.queue.done.txt"
    +    )
    +    if not os.path.exists(self.queue_done_filename):
    +        with open(self.queue_done_filename, "w") as f:
    +            pass
    +
    +    self.keepaliver = KeepAliver(
    +        address=None,
    +        socket=self.broker_socket,
    +        multiplier=multiplier,
    +        keepalive=keepalive,
    +        exit_event=self.destroy_event,
    +        service=self.service,
    +        whoami=NFP.WORKER,
    +        name=self.name,
    +        socket_lock=self.socket_lock,
    +        log_level=self.log_level,
    +    )
    +    self.keepaliver.start()
    +    self.client = NFPClient(
    +        self.broker, name=f"{self.name}-NFPClient", exit_event=self.exit_event
    +    )
    +
    +
    + + + +
    + + + + + + + + + +
    + + +

    + reconnect_to_broker() + +¤

    + + +
    + +

    Connect or reconnect to broker

    + +
    + Source code in norfab\core\worker.py +
    460
    +461
    +462
    +463
    +464
    +465
    +466
    +467
    +468
    +469
    +470
    +471
    +472
    +473
    +474
    +475
    +476
    +477
    +478
    def reconnect_to_broker(self):
    +    """Connect or reconnect to broker"""
    +    if self.broker_socket:
    +        self.send_to_broker(NFP.DISCONNECT)
    +        self.poller.unregister(self.broker_socket)
    +        self.broker_socket.close()
    +
    +    self.broker_socket = self.ctx.socket(zmq.DEALER)
    +    self.broker_socket.setsockopt_unicode(zmq.IDENTITY, self.name, "utf8")
    +    self.broker_socket.linger = 0
    +    self.broker_socket.connect(self.broker)
    +    self.poller.register(self.broker_socket, zmq.POLLIN)
    +
    +    # Register service with broker
    +    self.send_to_broker(NFP.READY)
    +
    +    log.info(
    +        f"{self.name} - registered to broker at '{self.broker}', service '{self.service}'"
    +    )
    +
    +
    +
    + +
    + +
    + + +

    + send_to_broker(command, msg=None) + +¤

    + + +
    + +

    Send message to broker.

    +

    If no msg is provided, creates one internally

    + +
    + Source code in norfab\core\worker.py +
    480
    +481
    +482
    +483
    +484
    +485
    +486
    +487
    +488
    +489
    +490
    +491
    +492
    +493
    +494
    +495
    +496
    +497
    +498
    +499
    +500
    +501
    +502
    def send_to_broker(self, command, msg: list = None):
    +    """Send message to broker.
    +
    +    If no msg is provided, creates one internally
    +    """
    +    if command == NFP.READY:
    +        msg = [b"", NFP.WORKER, NFP.READY, self.service]
    +    elif command == NFP.DISCONNECT:
    +        msg = [b"", NFP.WORKER, NFP.DISCONNECT, self.service]
    +    elif command == NFP.RESPONSE:
    +        msg = [b"", NFP.WORKER, NFP.RESPONSE] + msg
    +    elif command == NFP.EVENT:
    +        msg = [b"", NFP.WORKER, NFP.EVENT] + msg
    +    else:
    +        log.error(
    +            f"{self.name} - cannot send '{command}' to broker, command unsupported"
    +        )
    +        return
    +
    +    log.debug(f"{self.name} - sending '{msg}'")
    +
    +    with self.socket_lock:
    +        self.broker_socket.send_multipart(msg)
    +
    +
    +
    + +
    + +
    + + +

    + load_inventory() + +¤

    + + +
    + +

    Function to load inventory from broker for this worker name.

    + +
    + Source code in norfab\core\worker.py +
    504
    +505
    +506
    +507
    +508
    +509
    +510
    +511
    +512
    +513
    +514
    +515
    +516
    +517
    def load_inventory(self):
    +    """
    +    Function to load inventory from broker for this worker name.
    +    """
    +    inventory_data = self.client.get(
    +        "sid.service.broker", "get_inventory", kwargs={"name": self.name}
    +    )
    +
    +    log.debug(f"{self.name} - worker received invenotry data {inventory_data}")
    +
    +    if inventory_data["results"]:
    +        return json.loads(inventory_data["results"])
    +    else:
    +        return {}
    +
    +
    +
    + +
    + +
    + + +

    + fetch_file(url, raise_on_fail=False, read=True) + +¤

    + + +
    + +

    Function to download file from broker File Sharing Service

    + + +

    Parameters:

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    NameTypeDescriptionDefault
    url + str + +
    +

    file location string in nf://<filepath> format

    +
    +
    + required +
    raise_on_fail + bool + +
    +

    raise FIleNotFoundError if download fails

    +
    +
    + False +
    read + bool + +
    +

    if True returns file content, return OS path to saved file otherwise

    +
    +
    + True +
    + +
    + Source code in norfab\core\worker.py +
    545
    +546
    +547
    +548
    +549
    +550
    +551
    +552
    +553
    +554
    +555
    +556
    +557
    +558
    +559
    +560
    +561
    +562
    +563
    +564
    def fetch_file(
    +    self, url: str, raise_on_fail: bool = False, read: bool = True
    +) -> str:
    +    """
    +    Function to download file from broker File Sharing Service
    +
    +    :param url: file location string in ``nf://<filepath>`` format
    +    :param raise_on_fail: raise FIleNotFoundError if download fails
    +    :param read: if True returns file content, return OS path to saved file otherwise
    +    """
    +    status, file_content = self.client.fetch_file(url=url, read=read)
    +    msg = f"{self.name} - worker '{url}' fetch file failed with status '{status}'"
    +
    +    if status == "200":
    +        return file_content
    +    elif raise_on_fail is True:
    +        raise FileNotFoundError(msg)
    +    else:
    +        log.error(msg)
    +        return None
    +
    +
    +
    + +
    + +
    + + +

    + fetch_jinja2(url) + +¤

    + + +
    + +

    Helper function to recursively download Jinja2 template together with +other templates referenced using "include" statements

    + + +

    Parameters:

    + + + + + + + + + + + + + + + + + +
    NameTypeDescriptionDefault
    url + str + +
    +

    nf://file/path like URL to download file

    +
    +
    + required +
    + +
    + Source code in norfab\core\worker.py +
    566
    +567
    +568
    +569
    +570
    +571
    +572
    +573
    +574
    +575
    +576
    +577
    +578
    +579
    +580
    +581
    +582
    +583
    +584
    +585
    +586
    +587
    +588
    +589
    +590
    +591
    +592
    +593
    def fetch_jinja2(self, url: str) -> str:
    +    """
    +    Helper function to recursively download Jinja2 template together with
    +    other templates referenced using "include" statements
    +
    +    :param url: ``nf://file/path`` like URL to download file
    +    """
    +    filepath = self.fetch_file(url, read=False)
    +    if filepath is None:
    +        msg = f"{self.name} - file download failed '{url}'"
    +        raise FileNotFoundError(msg)
    +
    +    # download Jinja2 template "include"-ed files
    +    content = self.fetch_file(url, read=True)
    +    j2env = Environment(loader="BaseLoader")
    +    try:
    +        parsed_content = j2env.parse(content)
    +    except Exception as e:
    +        msg = f"{self.name} - Jinja2 template parsing failed '{url}', error: '{e}'"
    +        raise Exception(msg)
    +
    +    # run recursion on include statements
    +    for node in parsed_content.find_all(Include):
    +        include_file = node.template.value
    +        base_path = os.path.split(url)[0]
    +        self.fetch_jinja2(os.path.join(base_path, include_file))
    +
    +    return filepath
    +
    +
    +
    + +
    + + + +
    + +
    + +
    + + +
    + + +

    + request_filename(suuid, base_dir_jobs) + +¤

    + + +
    + +

    Returns freshly allocated request filename for given UUID str

    + +
    + Source code in norfab\core\worker.py +
    187
    +188
    +189
    +190
    def request_filename(suuid: Union[str, bytes], base_dir_jobs: str):
    +    """Returns freshly allocated request filename for given UUID str"""
    +    suuid = suuid.decode("utf-8") if isinstance(suuid, bytes) else suuid
    +    return os.path.join(base_dir_jobs, f"{suuid}.req")
    +
    +
    +
    + +
    + +
    + + +

    + reply_filename(suuid, base_dir_jobs) + +¤

    + + +
    + +

    Returns freshly allocated reply filename for given UUID str

    + +
    + Source code in norfab\core\worker.py +
    193
    +194
    +195
    +196
    def reply_filename(suuid: Union[str, bytes], base_dir_jobs: str):
    +    """Returns freshly allocated reply filename for given UUID str"""
    +    suuid = suuid.decode("utf-8") if isinstance(suuid, bytes) else suuid
    +    return os.path.join(base_dir_jobs, f"{suuid}.rep")
    +
    +
    +
    + +
    + +
    + + +

    + recv(worker, destroy_event) + +¤

    + + +
    + +

    Thread to process receive messages from broker.

    + +
    + Source code in norfab\core\worker.py +
    344
    +345
    +346
    +347
    +348
    +349
    +350
    +351
    +352
    +353
    +354
    +355
    +356
    +357
    +358
    +359
    +360
    +361
    +362
    +363
    +364
    +365
    +366
    +367
    +368
    +369
    +370
    +371
    +372
    +373
    +374
    +375
    +376
    def recv(worker, destroy_event):
    +    """Thread to process receive messages from broker."""
    +    while not destroy_event.is_set():
    +        # Poll socket for messages every second
    +        try:
    +            items = worker.poller.poll(1000)
    +        except KeyboardInterrupt:
    +            break  # Interrupted
    +        if items:
    +            msg = worker.broker_socket.recv_multipart()
    +            log.debug(f"{worker.name} - received '{msg}'")
    +            empty = msg.pop(0)
    +            header = msg.pop(0)
    +            command = msg.pop(0)
    +
    +            if command == NFP.POST:
    +                worker.post_queue.put(msg)
    +            elif command == NFP.DELETE:
    +                worker.delete_queue.put(msg)
    +            elif command == NFP.GET:
    +                worker.get_queue.put(msg)
    +            elif command == NFP.KEEPALIVE:
    +                worker.keepaliver.received_heartbeat([header] + msg)
    +            elif command == NFP.DISCONNECT:
    +                worker.reconnect_to_broker()
    +            else:
    +                log.debug(
    +                    f"{worker.name} - invalid input, header '{header}', command '{command}', message '{msg}'"
    +                )
    +
    +        if not worker.keepaliver.is_alive():
    +            log.warning(f"{worker.name} - '{worker.broker}' broker keepalive expired")
    +            worker.reconnect_to_broker()
    +
    +
    +
    + +
    + + + +
    + +
    + +
    + + + + + + + + + + + + + +
    +
    + + + +
    + + + +
    + + + +
    +
    +
    +
    + +
    + + + + + + + + + + \ No newline at end of file diff --git a/nornir_service/index.html b/nornir_service/index.html index bd18345..6447fcd 100755 --- a/nornir_service/index.html +++ b/nornir_service/index.html @@ -874,7 +874,7 @@
  • - + @@ -895,7 +895,7 @@
  • - + diff --git a/nornir_worker_api_reference/index.html b/nornir_worker_api_reference/index.html index 4d6a004..11e6905 100755 --- a/nornir_worker_api_reference/index.html +++ b/nornir_worker_api_reference/index.html @@ -867,7 +867,7 @@
  • - + @@ -888,7 +888,7 @@
  • - + @@ -1594,7 +1594,7 @@

    - Bases: WorkerWatchDog

    + Bases:
    WorkerWatchDog

    Class to monitor Nornir worker performance

    @@ -1952,7 +1952,7 @@

    - Bases: NFPWorker

    + Bases: NFPWorker

    diff --git a/objects.inv b/objects.inv index b1b610287199fc8ee339b1d9a2ef9ac3664b5bca..de0e388b514e32528beb1b99656a41b4f747d7cc 100755 GIT binary patch delta 875 zcmV-x1C;#l1?LBliGRy(+b|GC_x%b3?Y1hKWtUBBpqn6QP-IEa)X1hok<5@%SY}ELu&2RDan%?Pqg_F+IO$GaEwZLoH9b5YE%tz`9EO(q&jpgS-7$?>dz1{&z?_ zB)tcm!HyNXy0m<}f8RVO^V*i5fwWH{Ne01;SuoL_qcZ`y22aUi()VrY2|#NcU3Mr+ zmZ8bp{X)kDVYCL}t20f_E;QSk6*Z4`l{!Bo89P+xsej($n&)%3d03iqDgxdF*y<&h z{L!wk)}V*ce$`9 z-$d0}Xye;K50{@h@@7=dUtqBQkHb07aVn^37Y0jp6EX&JwWLf&Zl&JxJ2I#;Jl$^v zGVIOfJ%53Hi!a`c{SB^Ae$JXVgm9;;Q{b~Fu^HuabwB)J)^jpDCe>{O@|+cpujlT{ z{t7SxVb$(RMH&HnErL7E!L^sNM1x+@@Ro$GO134kSGf2&o1M1vtFw-Zhu(y8F7VDsJflVB2tGwH{;QDG`s~d z8(GAlme^kd3DuC>&8vzhFi^~=o~;w)m~5`~08>-C#2uH{`R$i(4F)F5 zDfiI20t-i$*yv8%r*T7U-#*u9j$}8W`q#zgd>?g7=DpNJWb5;efefx>JotfwZH)8o zzho#4+!rFKl_9j^N7Jf3uKIiv&I*qwqgQO8s1fRbuO7z}upAx4!-tpBz-an8<{`H# zcv!@p?YWCMTOKZbY@I>xxf&7>lY=&u2?bn7N&HCW1;BD8ONcMk6vMGP{{TlD+@?X{`8#~%B!QZHyFRP8a!BpwK4;;fUQ@b8=X zS_U!DrIYl=49@d9j_nvMQYi{1jRqS$b6OhUegnqv?j3v723A0OE#Vthm9WY$-0_Pv z!u!SNg)O69Z_9<#oS*~(E8)}o4~viQ>|X_zNROgeHLtRPnSZPlcLWB}aohuuR?w)G zwUt4my+aTQSV6c0V#~*;ukEKZ$6GtzncM?m3Je6`zma<6*t;229|(`>c-lJ=@sY2f>3? zJYec08?dRoU4NaTIioQ>zj`wplCBT+Jnf^oP_u#UE%DRt<7pxtPkVhBPb15%h;S;^GV<}Qhok-84-4$>zi zR?u`P0$G#SdoWj$<4sgs#v!2J^zGzrE#Hi)#13wDkAM4cJ_z~;P~XLwg;3z3Z#!(+ zK222p*ox1gvNZ%QHOX3si#sH-3R(*-JF}32rpz*nXd8#5Qe@Y$aEM|qvj~#abgILQ zY9!p|#pDy9*ctNCYp3E2+h-V$$3MSly3h||?bGBnf3liIWwKmfwx?upqw~Q>nYuwA z{C`opT51zn)Z$NJ-|(|(85D|dvEVU}MLro@@Egr5p$>%Rbv^+n$U{DST%;mntL;`i z@Bo!FvA<-@>>zx`XoZR9JB$9@{Il^L*XerUKxCtFB#wU|Y!=7QWOe{fm^cXMm+CI6 Ldv*Q*F`%d-^)NN3 diff --git a/search/search_index.json b/search/search_index.json index ad0ef20..1131fe0 100755 --- a/search/search_index.json +++ b/search/search_index.json @@ -1 +1 @@ -{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Overview","text":"

    Through lifting others we rise

    "},{"location":"#network-automations-fabric","title":"Network Automations Fabric","text":"

    Hi , thank you for getting here.

    "},{"location":"#why-the-story","title":"Why (The Story)","text":"

    In a world devoid of network automations, the streets were silent and grey. Without network automations, network engineers' lives became a grueling cycle of manual configurations and endless troubleshooting. They spent hours accessing each and every device, manually configuring and patching systems. Nights were sleepless, filled with frantic calls to resolve outages that could no longer be preemptively detected or resolved. Collaboration was slow, relying on face-to-face meetings and manual documentation. Overwhelmed and exhausted, their innovative spirit was stifled by the sheer volume of repetitive tasks...

    Let us introduce you to the world of Network Automations Fabric.

    "},{"location":"#what-the-idea","title":"What (The Idea)","text":"

    NorFab purpose is to augment engineers capabilities with automation superpowers.

    Most of the software to manage networks falls into one of the two categories:

    • heavyweight platforms running on dedicated infrastructure
    • lightweight scripts or tools developed and run locally

    NorFab can be both - software you can run equally well from your laptop or on a server, centralized or fully distributed, lightweight and feature reach. Capable of doing any use cases without the need to throw gazillions of dollars and man hours at it. Always ready to serve the purpose of unlocking engineers superpowers managing modern network and making their life better.

    "},{"location":"#how-the-features","title":"How (The Features)","text":"
    • Run Anywhere - locally on Windows, MAC or Linux, in a container, on a VM, in the cloud, centralized or distributed
    • Extend Anything - extendability is in the core of NorFab
    • Integrate with Everything - Python API, REST API, CLI northbound interfaces
    • Manage Anything - develop your own services or use built-in to manage your network infrastructure
    • Model and data driven - Pydantic models for API, validation and documentation
    • Automate Anything - we mean it, sky is the limit on what you can do with NorFab automating your networks
    "},{"location":"#architecture","title":"Architecture","text":"

    Key actors of the system include

    • WORKERS - form services, processes that run anywhere and act as resource proxy agents
    • CLIENTS - consume services, processes that run on client machine and connect to broker
    • BROKER - provides access to services for clients
    • RESOURCES - entities managed by workers, e.g. network devices, databases, file system
    • SERVICES - a collection of workers and managed resources

    Clients communicate with broker to submit jobs, broker distributes jobs across workers comprising the service, workers run jobs producing results later on retrieved by clients. In other words Services hosted by Workers and expose functionality consumed by Clients via Broker.

    "},{"location":"NFP/","title":"NORFAB Protocol","text":"

    Status: experimental Editor: d.mulyalin@gmail.com Contributors:

    The NORFAB Protocol (NFP) defines a reliable service-oriented request-reply dialog between a set of client applications, a broker and a set of worker applications representing service managing a set of resources.

    NFP covers presence, heartbeating, and service-resource-oriented request-reply processing. NFP originated from the MDP pattern defined in Chapter 4 of the ZeroMQ Guide and combined with TSP pattern (developed in same chapter) approach for persistent messaging across a network of arbitrarily connected clients and workers as a design for disk-based reliable messaging. NORFAB allows clients and workers to work without being connected to the network at the same time, and defines handshaking for safe storage of requests, and retrieval of replies.

    "},{"location":"NFP/#license","title":"License","text":"

    Copyright (c) 2024 Denis Mulyalin.

    This Specification is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version.

    This Specification is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.

    You should have received a copy of the GNU General Public License along with this program; if not, see http://www.gnu.org/licenses.

    "},{"location":"NFP/#change-process","title":"Change Process","text":"

    This Specification is a free and open standard (see \u201cDefinition of a Free and Open Standard\") and is governed by the Digital Standards Organization\u2019s Consensus-Oriented Specification System (COSS) (see \u201cConsensus Oriented Specification System\").

    "},{"location":"NFP/#language","title":"Language","text":"

    The key words \u201cMUST\u201d, \u201cMUST NOT\u201d, \u201cREQUIRED\u201d, \u201cSHALL\u201d, \u201cSHALL NOT\u201d, \u201cSHOULD\u201d, \u201cSHOULD NOT\u201d, \u201cRECOMMENDED\u201d, \u201cMAY\u201d, and \u201cOPTIONAL\u201d in this document are to be interpreted as described in RFC 2119 (see \u201cKey words for use in RFCs to Indicate Requirement Levels\").

    "},{"location":"NFP/#goals","title":"Goals","text":"

    The NORFAB Protocol (NFP) defines a reliable service-resource-oriented request-reply dialog between a set of client applications, a broker and a set of worker applications. NFP covers presence, heartbeating, and service-oriented request-reply processing.

    NFP uses name-based service resolution, named based resource targeting and structured protocol commands.

    The goals of NFP are to:

    • Allow requests to be routed to workers on the basis of abstract service names.
    • Allow broker and workers to detect disconnection of one another, through the use of heartbeating.
    • ALlow task distribution by clients targeting all (broadcast), any (anycast) or unicast certain workers by names within given service.
    • Allow the broker to recover from dead or disconnected workers by re-sending requests to other workers.
    • Allow workers to manage resource entities, where entities can be dynamically distributed across all workers within the service.
    • Allow workers to have access to inventory data hosted by broker
    "},{"location":"NFP/#architecture","title":"Architecture","text":""},{"location":"NFP/#overall-topology","title":"Overall Topology","text":"

    NFP connects a set of client applications, a single broker device and a pool of workers applications. Clients connect to the broker, as do workers. Clients and workers do not see each other, and both can come and go arbitrarily. The broker MAY open two sockets (ports), one front-end for clients, and one back-end for workers. However NFP is also designed to work over a single broker socket.

    We define \u2018client\u2019 applications as those issuing requests, and \u2018worker\u2019 applications as those processing them. NFP makes these assumptions:

    • Workers are idempotent, i.e. it is safe to execute the same request more than once.
    • Workers will handle at most one request a time, and will issue exactly one reply for each successful request.
    • The NORFAB broker mediates requests one a per service basis. The broker SHOULD serve clients on a fair basis and SHOULD deliver requests to workers on the basis of targeting specified by client - any worker, all workers or unicast worker identified by name.

    NFP consists of four sub-protocols:

    • NFP/Client, which covers how the NFP broker communicates with client applications.
    • NFP/Worker, which covers how the NFP broker communicates with workers applications.
    • NFP/Worker-PUB, which covers how broker subscribes to events published by workers.
    • NFP/Broker-PUB, which covers how broker publishes collected worker events to clients.

    The broker SHOULD be an intermediary (a device) application that mediates Client-Workers communication. The broker SHOULD integrate Management Interface (MMI) service directly into it together with simple disk based Inventory service for workers.

    "},{"location":"NFP/#router-addressing","title":"ROUTER Addressing","text":"

    The broker MUST use a ROUTER socket to accept requests from clients, and connections from workers. The broker MAY use a separate socket for each sub-protocol, or MAY use a single socket for both sub-protocols.

    From the \u00d8MQ Reference Manual:

    When receiving messages a ROUTER socket shall prepend a message part containing the identity of the originating peer to the message before passing it to the application. When sending messages a ROUTER socket shall remove the first part of the message and use it to determine the identity of the peer the message shall be routed to.

    This extra frame is not shown in the sub-protocol commands explained below.

    "},{"location":"NFP/#nfp-messages","title":"NFP messages","text":""},{"location":"NFP/#open","title":"OPEN","text":"

    A OPEN command consists of 4 frames, formatted on the wire as follows:

    OPEN command\n---------------------------------------------------------------\nFrame 0: Empty frame\nFrame 1: \u201cNFPC01\u201d or \u201cNFPW01\u201d or \u201cNFPB01\u201d (six bytes, representing NFP/Client or NFP/Worker or NFP/Broker v0.1)\nFrame 2: 0x00 (one byte, representing OPEN)\nFrame 3: Open body (opaque binary)\n

    Worker and client use OPEN message to introduce itself to broker to negotiate connection parameters. Broker sends OPEN message back to client or worker to confirm the connection.

    "},{"location":"NFP/#ready","title":"READY","text":"

    A READY command consists of a multipart message of 4 frames, formatted on the wire as follows:

    READY command\n---------------------------------------------------------------\nFrame 0: Empty frame\nFrame 1: \u201cNFPW01\u201d (six bytes, representing NFP/Worker v0.1)\nFrame 2: 0x01 (one byte, representing READY)\nFrame 3: Service name (printable string)\n

    Worker sends READY command to broker, broker accepts ready request and registers worker with a service.

    "},{"location":"NFP/#keepalive","title":"KEEPALIVE","text":"

    A KEEPALIVE command consists of 4 frames, formatted on the wire as follows:

    KEEPALIVE command\n---------------------------------------------------------------\nFrame 0: Empty frame\nFrame 1: \u201cNFPB01\u201d or \u201cNFPW01\u201d (six bytes, representing NFP/Broker or NFP/Worker v0.1)\nFrame 2: 0x02 (one byte, representing KEEPALIVE)\nFrame 3: Service name (printable string)\n

    Broker sends KEEPALIVE messages to workers to indicate broker is still alive.

    Workers send KEEPALIVE messages to broker to indicate worker is still alive.

    "},{"location":"NFP/#disconnect","title":"DISCONNECT","text":"

    A DISCONNECT command consists of 3 frames, formatted on the wire as follows:

    DISCONNECT command\n---------------------------------------------------------------\nFrame 0: Empty frame\nFrame 1: \u201cNFPB01\u201d or \u201cNFPW01\u201d (six bytes, representing NFP/Broker or NFP/Worker v0.1)\nFrame 2: 0x03 (one byte, representing DISCONNECT)\nFrame 3: Service name (printable string)\nFrame 4: Disconnect body (opaque binary)\n

    Broker sends DISCONNECT command to workers to signal the request to disconnect.

    Workers also can send DISCONNECT command to broker to signal the request to disconnect.

    "},{"location":"NFP/#post","title":"POST","text":"

    A POST command consists of 7 or more frames, formatted on the wire as follows:

    POST command\n---------------------------------------------------------------\nFrame 0: Empty (zero bytes, invisible to REQ application)\nFrame 1: \u201cNFPC01\u201d or \"NFPB01\" (six bytes, representing NFP/Client or NFP/Broker v0.1)\nFrame 2: 0x04 (one byte, representing POST)\nFrame 3: Service name (printable string)\nFrame 4: Target (printable string) workers, `all` (default), `any` or comma separated `worker names`\nFrame 5: Job UUID (printable string)\nFrames 6: POST body (opaque binary)\n

    Client sends POST message to broker to distribute job requests among workers.

    Broker relays POST message to individual workers to publish job request.

    "},{"location":"NFP/#response","title":"RESPONSE","text":"

    A RESPONSE command consists of 7 or more frames, formatted on the wire as follows:

    RESPONSE command\n---------------------------------------------------------------\nFrame 0: Empty (zero bytes, invisible to REQ application)\nFrame 1: \u201cNFPB01\u201d or \u201cNFPW01\u201d (six bytes, representing NFP/Broker or NFP/Worker v0.1)\nFrame 2: 0x05 (one byte, representing RESPONSE)\nFrame 3: Service name (printable string)\nFrame 4: Job UUID (printable string)\nFrame 5: Status code (explained below)\nFrames 6: Response body (opaque binary)\n

    Worker sends RESPONSE message to broker with requests status or job results.

    Broker relays RESPONSE message to client.

    "},{"location":"NFP/#get","title":"GET","text":"

    A GET command consists of 7 or more frames, formatted on the wire as follows:

    GET command\n---------------------------------------------------------------\nFrame 0: Empty (zero bytes, invisible to REQ application)\nFrame 1: \u201cNFPC01\u201d or \"NFPB01\" (six bytes, representing NFP/Client or NFP/Broker v0.1)\nFrame 2: 0x06 (one byte, representing GET)\nFrame 3: Service name (printable string)\nFrame 4: Target (printable string) workers, `all` (default), `any` or comma separated `worker names`\nFrame 5: Job UUID (printable string)\nFrames 6: GET request body (opaque binary)\n

    Client sends GET message to broker to retrieve job results.

    Broker relays GET message to individual workers to request job request.

    "},{"location":"NFP/#delete","title":"DELETE","text":"

    A DELETE command consists of 7 or more frames, formatted on the wire as follows:

    DELETE command\n---------------------------------------------------------------\nFrame 0: Empty (zero bytes, invisible to REQ application)\nFrame 1: \u201cNFPC01\u201d or \"NFPB01\" (six bytes, representing NFP/Client or NFP/Broker v0.1)\nFrame 2: 0x07 (one byte, representing POST)\nFrame 3: Service name (printable string)\nFrame 4: Target (printable string) workers, `all` (default), `any` or comma separated `worker names`\nFrame 5: Job UUID (printable string)\nFrames 6: DELETE body (opaque binary)\n

    Client sends DELETE message to broker to distribute job delete requests to workers.

    Broker relays DELETE message to individual workers to cancel the job.

    "},{"location":"NFP/#event","title":"EVENT","text":"

    A EVENT command consists of 7 or more frames, formatted on the wire as follows:

    EVENT command\n---------------------------------------------------------------\nFrame 0: Empty (zero bytes, invisible to REQ application)\nFrame 1: \u201cNFPW01\u201d (six bytes, representing NFP/Worker v0.1)\nFrame 2: 0x08 (one byte, representing EVENT)\nFrame 3: Service name (printable string)\nFrame 4: Topic (printable string e.g. Job UUID)\nFrame 5: Status code 200 (explained below)\nFrames 6: Event body (opaque binary)\n

    Worker sends EVENT message to Broker to supply information about job execution.

    Broker relays EVENT message to certain Client.

    "},{"location":"NFP/#status-frames","title":"Status Frames","text":"

    Every RESPONSE message contains a status frame followed by zero or more content frames. The status frame contains a string formatted as three digits, optionally followed by a space and descriptive text. A client MUST NOT treat the text as significant in any way. Implementations MAY NOT use status codes that are not defined here:

    200 - OK. The NORFAB worker executed the request successfully. 202 - ACCEPTED. The NORFAB Broker accepted POST request to dispatch the job. 300 - PENDING. The client SHOULD retry the request at a later time. 400 - UNKNOWN. The client is using an invalid or unknown UUID and SHOULD NOT retry. 408 - REQUEST TIMEOUT. Client did not receive response from broker or worker. 417 - EXPECT FAILED. Client did not receive what it was expecting to receive. 500 - ERROR. The server cannot complete the request due to some internal error. The client SHOULD retry at some later time.

    "},{"location":"NFP/#nfpclient","title":"NFP/Client","text":"

    NFP/Client is a strictly synchronous dialog initiated by the client (where \u2018C\u2019 represents the client, and \u2018B\u2019 represents the broker):

    C: OPEN\nB: OPEN\n\nRepeat:\n\n    C: POST\n    B: RESPONSE\n    ...\n\n    C: GET\n    B: RESPONSE\n    ...\n

    Clients SHOULD use a REQ socket when implementing a synchronous request-reply pattern. The REQ socket will silently create frame 0 for outgoing requests, and remove it for replies before passing them to the calling application.

    Clients MAY use any suitable strategy for recovering from a non-responsive broker. One recommended strategy is:

    • To use polling instead of blocking receives on the request socket.
    • If there is no reply within some timeout, to close the request socket and open a new socket, and resend the request on that new socket.
    • If there is no reply after several retries, to signal the transaction as failed.
    • The service name is a 0MQ string that matches the service name specified by a worker in its READY command (see NFP/Worker below). The broker SHOULD queue client requests for which service no workers has been registered and SHOULD expire these requests after a reasonable and configurable time if no service's workers has been registered.
    "},{"location":"NFP/#nfpbroker","title":"NFP/Broker","text":"

    NFP/Broker is a mediator that receives messages from clients and dispatches them out to workers. In return messages from workers routed to clients.

    "},{"location":"NFP/#nfpworker","title":"NFP/Worker","text":"

    NFP/Worker is a mix of a synchronous request-reply dialog, initiated by the service worker, and an asynchronous heartbeat dialog that operates independently in both directions. This is the synchronous dialog (where \u2018W\u2019 represents the service worker, and \u2018B\u2019 represents the broker):

    W: OPEN\nB: OPEN\nW: READY\n\nRepeat:\n\n    B: POST\n    W: RESPONSE\n    ...\n\n    B: GET\n    W: RESPONSE\n    ... \n

    The asynchronous heartbeat dialog operates on the same sockets and works thus:

    Repeat:                 Repeat:\n\n    W: HEARTBEAT            B: HEARTBEAT\n    ...                     ...\n\nW: DISCONNECT           B: DISCONNECT\n

    NFP/Worker commands all start with an empty frame to allow consistent processing of client and worker frames in a broker, over a single socket. The empty frame has no other significance.

    "},{"location":"NFP/#nfpworker-pub","title":"NFP/Worker-PUB","text":"

    TBD

    "},{"location":"NFP/#nfpbroker-pub","title":"NFP/Broker-PUB","text":"

    TBD

    "},{"location":"NFP/#job-persistence","title":"Job Persistence","text":"

    Workers SHOULD persistently store job requests and job execution results for a configurable amount of time allowing clients (client submitted job request or any other client) to request job execution results on demand.

    Clients SHOULD persistently store job requests and MAY store job execution results locally for a configurable amount of time.

    "},{"location":"NFP/#opening-and-closing-a-connection","title":"Opening and Closing a Connection","text":"

    The worker is responsible for opening and closing a logical connection. One worker MUST connect to exactly one broker using a single \u00d8MQ DEALER (XREQ) socket.

    Since \u00d8MQ automatically reconnects peers after a failure, every NFP command includes the protocol header to allow proper validation of all messages that a peer receives.

    The worker opens the connection to the broker by creating a new socket, connecting it, and then sending a READY command to register to a service. One worker handles precisely one service, and many workers MAY handle the same service. The worker MUST NOT send a further READY.

    There is no response to a READY. The worker SHOULD assume the registration succeeded until or unless it receives a DISCONNECT, or it detects a broker failure through heartbeating.

    The worker MAY send DISCONNECT at any time, including before READY. When the broker receives DISCONNECT from a worker it MUST send no further commands to that worker.

    The broker MAY send DISCONNECT at any time, by definition after it has received at least one command from the worker.

    The broker MUST respond to any valid but unexpected command by sending DISCONNECT and then no further commands to that worker. The broker SHOULD respond to invalid messages by dropping them and treating that peer as invalid.

    When the worker receives DISCONNECT it must send no further commands to the broker; it MUST close its socket, and reconnect to the broker on a new socket. This mechanism allows workers to re-register after a broker failure and recovery.

    "},{"location":"NFP/#post-and-response-processing","title":"POST and RESPONSE Processing","text":"

    The POST and the RESPONSE commands MUST contain precisely one client address frame. This frame MUST be followed by an empty (zero sized) frame.

    The address of each directly connected client is prepended by the ROUTER socket to all request messages coming from clients. That ROUTER socket also expects a client address to be prepended to each reply message sent to a client.

    "},{"location":"NFP/#keepaliving","title":"Keepaliving","text":"

    KEEPALIVE commands are valid at any time, after a READY command.

    Any received command except DISCONNECT acts as a keepalive. Peers SHOULD NOT send KEEPALIVE commands while also sending other commands.

    Both broker and worker MUST send heartbeats at regular and agreed-upon intervals. A peer MUST consider the other peer \u201cdisconnected\u201d if no keepalive arrives within some multiple of that interval (usually 3-5).

    If the worker detects that the broker has disconnected, it SHOULD restart a new conversation.

    If the broker detects that the worked has disconnected, it SHOULD stop sending messages of any type to that worker.

    "},{"location":"NFP/#broker-management-interface-bmmi","title":"Broker Management Interface (BMMI)","text":"

    Broker SHOULD implement Management interface as a service endpoint for clients to interact with.

    Broker should use mmi.service.broker service endpoint to listen to client's requests.

    These MMI functions SHOULD be implemented:

    • show_broker - to return broker status and statistics
    • show_workers - to return worker status and statistics
    • show_clients - to return clients statistics
    • show_services - to return services status and statistics
    • restart - restart broker
    • shutdown - shutdown broker completely
    • disconnect - to disconnect all workers
    "},{"location":"NFP/#worker-management-interface-wmmi","title":"Worker Management Interface (WMMI)","text":"

    Worker SHOULD implement Management interface as a service endpoint for clients to interact with.

    Worker should use mmi.service.worker service endpoint to listen to client's requests.

    These MMI functions SHOULD be implemented:

    • show_broker - to return broker status and statistics
    • show_workers - to return worker status and statistics
    • show_clients - to return clients statistics
    • restart - restart worker
    • shutdown - shutdown worker completely
    • disconnect - to disconnect worker from broker and re-establish connection
    "},{"location":"NFP/#broker-simple-inventory-datastore-sid","title":"Broker Simple Inventory Datastore (SID)","text":"

    Broker should implement Inventory Datastore to store and serve configuration to workers as well as arbitrary workers inventory data.

    Broker should use sid.service.broker service endpoint to listen to worker's requests.

    Workers willing to make use of broker's inventory datastore should implement NFP/Client protocol defined above to request inventory data.

    These SID functions SHOULD be implemented:

    • get_inventory - to return inventory content for given worker
    "},{"location":"NFP/#sid-implementation","title":"SID Implementation","text":"

    TBD

    "},{"location":"NFP/#broker-file-sharing-service-fss","title":"Broker File Sharing Service (FSS)","text":"

    Broker implements service to serve files to clients and workers from local file system using nf://<filepath> URL for supported arguments.

    Broker should use fss.service.broker service endpoint to listen to worker's requests.

    "},{"location":"NFP/#fss-implementation","title":"FSS Implementation","text":"

    TBD

    "},{"location":"NFP/#reliability","title":"Reliability","text":"

    The NORFAB pattern is designed to extend the basic \u00d8MQ request-reply pattern with the ability to detect and recover from a specific set of failures:

    • Worker applications which crash, run too slowly, or freeze.
    • Worker applications that are disconnected from the network (temporarily or permanently).
    • Client applications that are temporarily disconnected from the network.
    • A queue broker that crashes and is restarted.
    • A queue broker that suffers a permanent failure.
    • Requests or replies that are lost due to any of these failures.
    • The general approach is to retry and reconnect, using heartbeating when needed.
    "},{"location":"NFP/#scalability-and-performance","title":"Scalability and Performance","text":"

    NORFAB is designed to be scalable to large numbers (thousands) of workers and clients allowing to manage 10s of thousands resource entities, limited only by system resources on the broker. Partitioning of workers by service allows for multiple applications to share the same broker infrastructure. Workers manage a set of resources defined by system administrator. Same resource can be managed by single or multiple workers, system impose no restrictions on how resource entities distributed across workers.

    Throughput performance for a single client application will be limited to tens of thousands, not millions, of request-reply transactions per second due to round-trip costs and the extra latency of a broker-based approach. The larger the request and reply messages, the more efficient NORFAB will become.

    System requirements for the broker are moderate: no more than one outstanding request per client will be queued, and message contents can be switched between clients and workers without copying or processing. A single broker thread can therefore switch several million messages per second.

    "},{"location":"NFP/#security","title":"Security","text":""},{"location":"NFP/#worker-authentication","title":"Worker Authentication","text":"

    TBD

    "},{"location":"NFP/#worker-authorization","title":"Worker Authorization","text":"

    TBD

    "},{"location":"NFP/#client-authentication","title":"Client Authentication","text":"

    TBD

    "},{"location":"NFP/#client-authorization-role-based-access-control-rbac","title":"Client Authorization - Role Based Access Control (RBAC)","text":"

    TBD

    "},{"location":"NFP/#client-encryption","title":"Client Encryption","text":"

    TBD

    "},{"location":"NFP/#worker-encryption","title":"Worker Encryption","text":"

    TBD

    "},{"location":"NFP/#accounting","title":"Accounting","text":"

    TBD

    "},{"location":"NFP/#known-weaknesses","title":"Known Weaknesses","text":"
    • The heartbeat rate must be set to similar values in broker and worker, or false disconnections will occur.
    • The use of multiple frames for command formatting has a performance impact.
    "},{"location":"netbox_worker_api_reference/","title":"Netbox Worker","text":""},{"location":"netbox_worker_api_reference/#norfab.workers.netbox_worker--netbox-worker-inventory-reference","title":"Netbox Worker Inventory Reference","text":""},{"location":"netbox_worker_api_reference/#norfab.workers.netbox_worker--sample-netbox-worker-inventory","title":"Sample Netbox Worker Inventory","text":"
    service: netbox\nbroker_endpoint: \"tcp://127.0.0.1:5555\"\ninstances:\n  prod:\n    default: True\n    url: \"http://192.168.4.130:8000/\"\n    token: \"0123456789abcdef0123456789abcdef01234567\"\n    ssl_verify: False\n  dev:\n    url: \"http://192.168.4.131:8000/\"\n    token: \"0123456789abcdef0123456789abcdef01234567\"\n    ssl_verify: False\n  preprod:\n    url: \"http://192.168.4.132:8000/\"\n    token: \"0123456789abcdef0123456789abcdef01234567\"\n    ssl_verify: False\n
    "},{"location":"netbox_worker_api_reference/#norfab.workers.netbox_worker--sample-nornir-worker-netbox-inventory","title":"Sample Nornir Worker Netbox Inventory","text":"
    netbox:\n  retry: 3\n  retry_interval: 1\n  instance: prod\n  interfaces:\n    ip_addresses: True\n    inventory_items: True\n  connections:\n    cables: True\n    circuits: True\n  nbdata: True\n  primary_ip: \"ipv4\"\n  devices:\n    - fceos4\n    - fceos5\n    - fceos8\n    - ceos1\n  filters: \n    - q: fceos3\n    - manufacturer: cisco\n      platform: cisco_xr\n
    "},{"location":"netbox_worker_api_reference/#norfab.workers.netbox_worker.NetboxWorker","title":"NetboxWorker(broker, service, worker_name, exit_event=None, init_done_event=None, log_level='WARNING')","text":"

    Bases: NFPWorker

    Parameters:

    Name Type Description Default broker

    broker URL to connect to

    required service

    name of the service with worker belongs to

    required worker_name

    name of this worker

    required exit_event

    if set, worker need to stop/exit

    None init_done_event

    event to set when worker done initializing

    None log_keve

    logging level of this worker

    required Source code in norfab\\workers\\netbox_worker.py
    def __init__(\n    self,\n    broker,\n    service,\n    worker_name,\n    exit_event=None,\n    init_done_event=None,\n    log_level=\"WARNING\",\n):\n    super().__init__(broker, service, worker_name, exit_event, log_level)\n    self.init_done_event = init_done_event\n\n    # get inventory from broker\n    self.inventory = self.load_inventory()\n    if not self.inventory:\n        log.critical(\n            f\"{self.name} - Broker {self.broker} returned no inventory for {self.name}, killing myself...\"\n        )\n        self.destroy()\n\n    assert self.inventory.get(\n        \"instances\"\n    ), f\"{self.name} - inventory has no Netbox instances\"\n\n    # find default instance\n    for name, params in self.inventory[\"instances\"].items():\n        if params.get(\"default\") is True:\n            self.default_instance = name\n            break\n    else:\n        self.default_instance = name\n\n    # check Netbox compatibility\n    self._verify_compatibility()\n\n    self.init_done_event.set()\n    log.info(f\"{self.name} - Started\")\n
    "},{"location":"netbox_worker_api_reference/#norfab.workers.netbox_worker.NetboxWorker.graphql","title":"graphql(instance=None, dry_run=False, obj=None, filters=None, fields=None, queries=None, query_string=None)","text":"

    Function to query Netbox v4 GraphQL API

    Parameters:

    Name Type Description Default instance str

    Netbox instance name

    None dry_run bool

    only return query content, do not run it

    False Source code in norfab\\workers\\netbox_worker.py
    def graphql(\n    self,\n    instance: str = None,\n    dry_run: bool = False,\n    obj: dict = None,\n    filters: dict = None,\n    fields: list = None,\n    queries: dict = None,\n    query_string: str = None,\n) -> Result:\n    \"\"\"\n    Function to query Netbox v4 GraphQL API\n\n    :param instance: Netbox instance name\n    :param dry_run: only return query content, do not run it\n    \"\"\"\n    nb_params = self._get_instance_params(instance)\n    ret = Result(task=f\"{self.name}:graphql\")\n\n    # form graphql query(ies) payload\n    if queries:\n        queries_list = []\n        for alias, query_data in queries.items():\n            query_data[\"alias\"] = alias\n            if self.nb_version[0] == 4:\n                queries_list.append(_form_query_v4(**query_data))\n            elif self.nb_version[0] == 3:\n                queries_list.append(_form_query_v3(**query_data))\n        queries_strings = \"    \".join(queries_list)\n        query = f\"query {{{queries_strings}}}\"\n    elif obj and filters and fields:\n        if self.nb_version[0] == 4:\n            query = _form_query_v4(obj, filters, fields)\n        elif self.nb_version[0] == 3:\n            query = _form_query_v3(obj, filters, fields)\n        query = f\"query {{{query}}}\"\n    elif query_string:\n        query = query_string\n    else:\n        raise RuntimeError(\n            f\"{self.name} - graphql method expects quieries argument or obj, filters, \"\n            f\"fields arguments or query_string argument provided\"\n        )\n    payload = json.dumps({\"query\": query})\n\n    # form and return dry run response\n    if dry_run:\n        ret.result = {\n            \"url\": f\"{nb_params['url']}/graphql/\",\n            \"data\": payload,\n            \"verify\": nb_params.get(\"ssl_verify\", True),\n            \"headers\": {\n                \"Content-Type\": \"application/json\",\n                \"Accept\": \"application/json\",\n                \"Authorization\": f\"Token ...{nb_params['token'][-6:]}\",\n            },\n        }\n        return ret\n\n    # send request to Netbox GraphQL API\n    log.debug(\n        f\"{self.name} - sending GraphQL query '{payload}' to URL '{nb_params['url']}/graphql/'\"\n    )\n    req = requests.post(\n        url=f\"{nb_params['url']}/graphql/\",\n        headers={\n            \"Content-Type\": \"application/json\",\n            \"Accept\": \"application/json\",\n            \"Authorization\": f\"Token {nb_params['token']}\",\n        },\n        data=payload,\n        verify=nb_params.get(\"ssl_verify\", True),\n        timeout=(3, 600),\n    )\n    try:\n        req.raise_for_status()\n    except Exception as e:\n        raise Exception(\n            f\"{self.name} -  Netbox GraphQL query failed, query '{query}', \"\n            f\"URL '{req.url}', status-code '{req.status_code}', reason '{req.reason}', \"\n            f\"response content '{req.text}'\"\n        )\n\n    # return results\n    reply = req.json()\n    if reply.get(\"errors\"):\n        msg = f\"{self.name} - GrapQL query error '{reply['errors']}', query '{payload}'\"\n        log.error(msg)\n        ret.errors.append(msg)\n        if reply.get(\"data\"):\n            ret.result = reply[\"data\"]  # at least return some data\n    elif queries or query_string:\n        ret.result = reply[\"data\"]\n    else:\n        ret.result = reply[\"data\"][obj]\n\n    return ret\n
    "},{"location":"netbox_worker_api_reference/#norfab.workers.netbox_worker.NetboxWorker.rest","title":"rest(instance=None, method='get', api='', **kwargs)","text":"

    Method to query Netbox REST API.

    Parameters:

    Name Type Description Default instance str

    Netbox instance name

    None method str

    requests method name e.g. get, post, put etc.

    'get' api str

    api url to query e.g. \"extras\" or \"dcim/interfaces\" etc.

    '' kwargs

    any additional requests method's arguments

    {} Source code in norfab\\workers\\netbox_worker.py
    def rest(\n    self, instance: str = None, method: str = \"get\", api: str = \"\", **kwargs\n) -> dict:\n    \"\"\"\n    Method to query Netbox REST API.\n\n    :param instance: Netbox instance name\n    :param method: requests method name e.g. get, post, put etc.\n    :param api: api url to query e.g. \"extras\" or \"dcim/interfaces\" etc.\n    :param kwargs: any additional requests method's arguments\n    \"\"\"\n    params = self._get_instance_params(instance)\n\n    # send request to Netbox REST API\n    response = getattr(requests, method)(\n        url=f\"{params['url']}/api/{api}/\",\n        headers={\n            \"Content-Type\": \"application/json\",\n            \"Accept\": \"application/json\",\n            \"Authorization\": f\"Token {params['token']}\",\n        },\n        verify=params.get(\"ssl_verify\", True),\n        **kwargs,\n    )\n\n    response.raise_for_status()\n\n    return response.json()\n
    "},{"location":"netbox_worker_api_reference/#norfab.workers.netbox_worker.NetboxWorker.get_devices","title":"get_devices(filters=None, instance=None, dry_run=False, devices=None)","text":"

    Function to retrieve devices data from Netbox using GraphQL API.

    Parameters:

    Name Type Description Default filters list

    list of filters dictionaries to filter devices

    None instance str

    Netbox instance name

    None dry_run bool

    only return query content, do not run it

    False devices list

    list of device names to query data for

    None

    Returns:

    Type Description Result

    dictionary keyed by device name with device data

    Source code in norfab\\workers\\netbox_worker.py
    def get_devices(\n    self,\n    filters: list = None,\n    instance: str = None,\n    dry_run: bool = False,\n    devices: list = None,\n) -> Result:\n    \"\"\"\n    Function to retrieve devices data from Netbox using GraphQL API.\n\n    :param filters: list of filters dictionaries to filter devices\n    :param instance: Netbox instance name\n    :param dry_run: only return query content, do not run it\n    :param devices: list of device names to query data for\n    :return: dictionary keyed by device name with device data\n    \"\"\"\n    ret = Result(task=f\"{self.name}:get_devices\", result={})\n    instance = instance or self.default_instance\n    filters = filters or []\n\n    device_fields = [\n        \"name\",\n        \"last_updated\",\n        \"custom_field_data\",\n        \"tags {name}\",\n        \"device_type {model}\",\n        \"role {name}\",\n        \"config_context\",\n        \"tenant {name}\",\n        \"platform {name}\",\n        \"serial\",\n        \"asset_tag\",\n        \"site {name tags{name}}\",\n        \"location {name}\",\n        \"rack {name}\",\n        \"status\",\n        \"primary_ip4 {address}\",\n        \"primary_ip6 {address}\",\n        \"airflow\",\n        \"position\",\n    ]\n\n    # form queries dictionary out of filters\n    queries = {\n        f\"devices_by_filter_{index}\": {\n            \"obj\": \"device_list\",\n            \"filters\": filter_item,\n            \"fields\": device_fields,\n        }\n        for index, filter_item in enumerate(filters)\n    }\n\n    # add devices list query\n    if devices:\n        if self.nb_version[0] == 4:\n            dlist = '[\"{dl}\"]'.format(dl='\", \"'.join(devices))\n            filters_dict = {\"name\": f\"{{in_list: {dlist}}}\"}\n        elif self.nb_version[0] == 3:\n            filters_dict = {\"name\": devices}\n        queries[\"devices_by_devices_list\"] = {\n            \"obj\": \"device_list\",\n            \"filters\": filters_dict,\n            \"fields\": device_fields,\n        }\n\n    # send queries\n    query_result = self.graphql(queries=queries, instance=instance, dry_run=dry_run)\n    devices_data = query_result.result\n\n    # return dry run result\n    if dry_run:\n        return query_result\n\n    # check for errors\n    if query_result.errors:\n        msg = f\"{self.name} - get devices query failed with errors:\\n{query_result.errors}\"\n        raise Exception(msg)\n\n    # process devices\n    for devices_list in devices_data.values():\n        for device in devices_list:\n            if device[\"name\"] not in ret.result:\n                ret.result[device.pop(\"name\")] = device\n\n    return ret\n
    "},{"location":"netbox_worker_api_reference/#norfab.workers.netbox_worker.NetboxWorker.get_interfaces","title":"get_interfaces(instance=None, devices=None, ip_addresses=False, inventory_items=False, dry_run=False)","text":"

    Function to retrieve device interfaces from Netbox using GraphQL API.

    Parameters:

    Name Type Description Default instance str

    Netbox instance name

    None devices list

    list of devices to retrieve interfaces for

    None ip_addresses bool

    if True, retrieves interface IPs

    False inventory_items bool

    if True, retrieves interface inventory items

    False dry_run bool

    only return query content, do not run it

    False

    Returns:

    Type Description Result

    dictionary keyed by device name with interface details

    Source code in norfab\\workers\\netbox_worker.py
    def get_interfaces(\n    self,\n    instance: str = None,\n    devices: list = None,\n    ip_addresses: bool = False,\n    inventory_items: bool = False,\n    dry_run: bool = False,\n) -> Result:\n    \"\"\"\n    Function to retrieve device interfaces from Netbox using GraphQL API.\n\n    :param instance: Netbox instance name\n    :param devices: list of devices to retrieve interfaces for\n    :param ip_addresses: if True, retrieves interface IPs\n    :param inventory_items: if True, retrieves interface inventory items\n    :param dry_run: only return query content, do not run it\n    :return: dictionary keyed by device name with interface details\n    \"\"\"\n    # form final result object\n    ret = Result(\n        task=f\"{self.name}:get_interfaces\", result={d: {} for d in devices}\n    )\n    intf_fields = [\n        \"name\",\n        \"enabled\",\n        \"description\",\n        \"mtu\",\n        \"parent {name}\",\n        \"mac_address\",\n        \"mode\",\n        \"untagged_vlan {vid name}\",\n        \"vrf {name}\",\n        \"tagged_vlans {vid name}\",\n        \"tags {name}\",\n        \"custom_fields\",\n        \"last_updated\",\n        \"bridge {name}\",\n        \"child_interfaces {name}\",\n        \"bridge_interfaces {name}\",\n        \"member_interfaces {name}\",\n        \"wwn\",\n        \"duplex\",\n        \"speed\",\n        \"id\",\n        \"device {name}\",\n    ]\n\n    # add IP addresses to interfaces fields\n    if ip_addresses:\n        intf_fields.append(\n            \"ip_addresses {address status role dns_name description custom_fields last_updated tenant {name} tags {name}}\"\n        )\n\n    # form interfaces query dictionary\n    queries = {\n        \"interfaces\": {\n            \"obj\": \"interface_list\",\n            \"filters\": {\"device\": devices},\n            \"fields\": intf_fields,\n        }\n    }\n\n    # add query to retrieve inventory items\n    if inventory_items:\n        inv_filters = {\"device\": devices, \"component_type\": \"dcim.interface\"}\n        inv_fields = [\n            \"name\",\n            \"component {... on InterfaceType {id}}\",\n            \"role {name}\",\n            \"manufacturer {name}\",\n            \"custom_fields\",\n            \"label\",\n            \"description\",\n            \"tags {name}\",\n            \"asset_tag\",\n            \"serial\",\n            \"part_id\",\n        ]\n        queries[\"inventor_items\"] = {\n            \"obj\": \"inventory_item_list\",\n            \"filters\": inv_filters,\n            \"fields\": inv_fields,\n        }\n\n    query_result = self.graphql(instance=instance, queries=queries, dry_run=dry_run)\n\n    # return dry run result\n    if dry_run:\n        return query_result\n\n    interfaces_data = query_result.result\n\n    # exit if no Interfaces returned\n    if not interfaces_data.get(\"interfaces\"):\n        raise Exception(\n            f\"{self.name} - no interfaces data in '{interfaces_data}' returned by '{instance}' \"\n            f\"for devices {', '.join(devices)}\"\n        )\n\n    # process query results\n    interfaces = interfaces_data.pop(\"interfaces\")\n\n    # process inventory items\n    if inventory_items:\n        inventory_items_list = interfaces_data.pop(\"inventor_items\")\n        # transform inventory items list to a dictionary keyed by intf_id\n        inventory_items_dict = {}\n        while inventory_items_list:\n            inv_item = inventory_items_list.pop()\n            # skip inventory items that does not assigned to components\n            if inv_item.get(\"component\") is None:\n                continue\n            intf_id = str(inv_item.pop(\"component\").pop(\"id\"))\n            inventory_items_dict.setdefault(intf_id, [])\n            inventory_items_dict[intf_id].append(inv_item)\n        # iterate over interfaces and add inventory items\n        for intf in interfaces:\n            intf[\"inventory_items\"] = inventory_items_dict.pop(intf[\"id\"], [])\n\n    # transform interfaces list to dictionary keyed by device and interfaces names\n    while interfaces:\n        intf = interfaces.pop()\n        _ = intf.pop(\"id\")\n        device_name = intf.pop(\"device\").pop(\"name\")\n        intf_name = intf.pop(\"name\")\n        if device_name in ret.result:  # Netbox issue #16299\n            ret.result[device_name][intf_name] = intf\n\n    return ret\n
    "},{"location":"netbox_worker_api_reference/#norfab.workers.netbox_worker.NetboxWorker.get_connections","title":"get_connections(devices, instance=None, dry_run=False, cables=False, circuits=False)","text":"

    Function to retrieve device connections data from Netbox using GraphQL API.

    Parameters:

    Name Type Description Default instance str

    Netbox instance name

    None devices list

    list of devices to retrieve interface for

    required dry_run bool

    only return query content, do not run it

    False cables bool

    if True includes interfaces' directly attached cables details

    False circuits bool

    if True includes interfaces' circuits termination details

    False

    Returns:

    Type Description Result

    dictionary keyed by device name with connections data

    Source code in norfab\\workers\\netbox_worker.py
    def get_connections(\n    self,\n    devices: list,\n    instance: str = None,\n    dry_run: bool = False,\n    cables: bool = False,\n    circuits: bool = False,\n) -> Result:\n    \"\"\"\n    Function to retrieve device connections data from Netbox using GraphQL API.\n\n    :param instance: Netbox instance name\n    :param devices: list of devices to retrieve interface for\n    :param dry_run: only return query content, do not run it\n    :param cables: if True includes interfaces' directly attached cables details\n    :param circuits: if True includes interfaces' circuits termination details\n    :return: dictionary keyed by device name with connections data\n    \"\"\"\n    # form final result dictionary\n    ret = Result(\n        task=f\"{self.name}:get_connections\", result={d: {} for d in devices}\n    )\n\n    # form lists of fields to request from netbox\n    cable_fields = \"\"\"\n        cable {\n            type\n            status\n            tenant {name}\n            label\n            tags {name}\n            length\n            length_unit\n            custom_fields\n        }\n    \"\"\"\n    if self.nb_version[0] == 4:\n        interfaces_fields = [\n            \"name\",\n            \"device {name}\",\n            \"\"\"connected_endpoints {\n            __typename \n            ... on InterfaceType {name device {name}}\n            ... on ProviderNetworkType {name}\n            }\"\"\",\n        ]\n    elif self.nb_version[0] == 3:\n        interfaces_fields = [\n            \"name\",\n            \"device {name}\",\n            \"\"\"connected_endpoints {\n            __typename \n            ... on InterfaceType {name device {name}}\n            }\"\"\",\n        ]\n    console_ports_fields = [\n        \"name\",\n        \"device {name}\",\n        \"\"\"connected_endpoints {\n          __typename \n          ... on ConsoleServerPortType {name device {name}}\n        }\"\"\",\n        \"\"\"link_peers {\n          __typename\n          ... on ConsoleServerPortType {name device {name}}\n          ... on FrontPortType {name device {name}}\n          ... on RearPortType {name device {name}}\n        }\"\"\",\n    ]\n    console_server_ports_fields = [\n        \"name\",\n        \"device {name}\",\n        \"\"\"connected_endpoints {\n          __typename \n          ... on ConsolePortType {name device {name}}\n        }\"\"\",\n        \"\"\"link_peers {\n          __typename\n          ... on ConsolePortType {name device {name}}\n          ... on FrontPortType {name device {name}}\n          ... on RearPortType {name device {name}}\n        }\"\"\",\n    ]\n\n    # add circuits info\n    if circuits is True:\n        interfaces_fields.append(\n            \"\"\"\n            link_peers {\n                __typename\n                ... on InterfaceType {name device {name}}\n                ... on FrontPortType {name device {name}}\n                ... on RearPortType {name device {name}}\n                ... on CircuitTerminationType {\n                    circuit{\n                        cid \n                        description \n                        tags{name} \n                        provider{name} \n                        status\n                        custom_fields\n                        commit_rate\n                    }\n                }\n            }\n        \"\"\"\n        )\n    else:\n        interfaces_fields.append(\n            \"\"\"\n            link_peers {\n                __typename\n                ... on InterfaceType {name device {name}}\n                ... on FrontPortType {name device {name}}\n                ... on RearPortType {name device {name}}\n            }\n        \"\"\"\n        )\n\n    # check if need to include cables info\n    if cables is True:\n        interfaces_fields.append(cable_fields)\n        console_ports_fields.append(cable_fields)\n        console_server_ports_fields.append(cable_fields)\n\n    # form query dictionary with aliases to get data from Netbox\n    queries = {\n        \"interface\": {\n            \"obj\": \"interface_list\",\n            \"filters\": {\"device\": devices},\n            \"fields\": interfaces_fields,\n        },\n        \"consoleport\": {\n            \"obj\": \"console_port_list\",\n            \"filters\": {\"device\": devices},\n            \"fields\": console_ports_fields,\n        },\n        \"consoleserverport\": {\n            \"obj\": \"console_server_port_list\",\n            \"filters\": {\"device\": devices},\n            \"fields\": console_server_ports_fields,\n        },\n    }\n\n    # retrieve full list of devices interface with all cables\n    query_result = self.graphql(queries=queries, instance=instance, dry_run=dry_run)\n\n    # return dry run result\n    if dry_run:\n        return query_result\n\n    all_ports = query_result.result\n\n    # extract interfaces\n    for port_type, ports in all_ports.items():\n        for port in ports:\n            endpoints = port[\"connected_endpoints\"]\n            # skip ports that have no remote device connected\n            if not endpoints or not all(i for i in endpoints):\n                continue\n\n            # extract required parameters\n            cable = port.get(\"cable\", {})\n            device_name = port[\"device\"][\"name\"]\n            port_name = port[\"name\"]\n            link_peers = port[\"link_peers\"]\n            remote_termination_type = endpoints[0][\"__typename\"].lower()\n            remote_termination_type = remote_termination_type.replace(\"type\", \"\")\n\n            # form initial connection dictionary\n            connection = {\n                \"breakout\": len(endpoints) > 1,\n                \"remote_termination_type\": remote_termination_type,\n                \"termination_type\": port_type,\n            }\n\n            # add remote connection details\n            if remote_termination_type == \"providernetwork\":\n                connection[\"remote_device\"] = None\n                connection[\"remote_interface\"] = None\n                connection[\"provider\"] = endpoints[0][\"name\"]\n            else:\n                remote_interface = endpoints[0][\"name\"]\n                if len(endpoints) > 1:\n                    remote_interface = [i[\"name\"] for i in endpoints]\n                connection[\"remote_interface\"] = remote_interface\n                connection[\"remote_device\"] = endpoints[0][\"device\"][\"name\"]\n\n            # handle circuits\n            if (\n                circuits and \"circuit\" in link_peers[0]\n            ):  # add circuit connection details\n                connection[\"circuit\"] = link_peers[0][\"circuit\"]\n\n            # add cable and its peer details\n            if cables:\n                peer_termination_type = link_peers[0][\"__typename\"].lower()\n                peer_termination_type = peer_termination_type.replace(\"type\", \"\")\n                cable[\"peer_termination_type\"] = peer_termination_type\n                cable[\"peer_device\"] = link_peers[0].get(\"device\", {}).get(\"name\")\n                cable[\"peer_interface\"] = link_peers[0].get(\"name\")\n                if len(link_peers) > 1:  # handle breakout cable\n                    cable[\"peer_interface\"] = [i[\"name\"] for i in link_peers]\n                connection[\"cable\"] = cable\n\n            ret.result[device_name][port_name] = connection\n\n    return ret\n
    "},{"location":"netbox_worker_api_reference/#norfab.workers.netbox_worker.NetboxWorker.get_circuits","title":"get_circuits(devices, instance=None, dry_run=False)","text":"

    Function to retrieve device circuits data from Netbox using GraphQL API.

    Parameters:

    Name Type Description Default devices list

    list of devices to retrieve interface for

    required instance str

    Netbox instance name

    None dry_run bool

    only return query content, do not run it

    False

    Returns:

    Type Description

    dictionary keyed by device name with circuits data

    Source code in norfab\\workers\\netbox_worker.py
    def get_circuits(\n    self,\n    devices: list,\n    instance: str = None,\n    dry_run: bool = False,\n):\n    \"\"\"\n    Function to retrieve device circuits data from Netbox using GraphQL API.\n\n    :param devices: list of devices to retrieve interface for\n    :param instance: Netbox instance name\n    :param dry_run: only return query content, do not run it\n    :return: dictionary keyed by device name with circuits data\n    \"\"\"\n    # form final result object\n    ret = Result(task=f\"{self.name}:get_circuits\", result={d: {} for d in devices})\n\n    device_sites_fields = [\"site {slug}\"]\n    circuit_fields = [\n        \"cid\",\n        \"tags {name}\",\n        \"provider {name}\",\n        \"commit_rate\",\n        \"description\",\n        \"status\",\n        \"type {name}\",\n        \"provider_account {name}\",\n        \"tenant {name}\",\n        \"termination_a {id}\",\n        \"termination_z {id}\",\n        \"custom_fields\",\n        \"comments\",\n    ]\n\n    # retrieve list of hosts' sites\n    if self.nb_version[0] == 4:\n        dlist = '[\"{dl}\"]'.format(dl='\", \"'.join(devices))\n        device_filters_dict = {\"name\": f\"{{in_list: {dlist}}}\"}\n    elif self.nb_version[0] == 3:\n        device_filters_dict = {\"name\": devices}\n    device_sites = self.graphql(\n        obj=\"device_list\",\n        filters=device_filters_dict,\n        fields=device_sites_fields,\n        instance=instance,\n    )\n    sites = list(set([i[\"site\"][\"slug\"] for i in device_sites.result]))\n\n    # retrieve all circuits for devices' sites\n    if self.nb_version[0] == 4:\n        circuits_filters_dict = {\"site\": sites}\n    elif self.nb_version[0] == 3:\n        circuits_filters_dict = {\"site\": sites}\n\n    query_result = self.graphql(\n        obj=\"circuit_list\",\n        filters=circuits_filters_dict,\n        fields=circuit_fields,\n        dry_run=dry_run,\n        instance=instance,\n    )\n\n    # return dry run result\n    if dry_run is True:\n        return query_result\n\n    all_circuits = query_result.result\n\n    # iterate over circuits and map them to devices\n    for circuit in all_circuits:\n        cid = circuit.pop(\"cid\")\n        circuit[\"tags\"] = [i[\"name\"] for i in circuit[\"tags\"]]\n        circuit[\"type\"] = circuit[\"type\"][\"name\"]\n        circuit[\"provider\"] = circuit[\"provider\"][\"name\"]\n        circuit[\"tenant\"] = circuit[\"tenant\"][\"name\"] if circuit[\"tenant\"] else None\n        circuit[\"provider_account\"] = (\n            circuit[\"provider_account\"][\"name\"]\n            if circuit[\"provider_account\"]\n            else None\n        )\n        termination_a = circuit.pop(\"termination_a\")\n        termination_z = circuit.pop(\"termination_z\")\n        termination_a = termination_a[\"id\"] if termination_a else None\n        termination_z = termination_z[\"id\"] if termination_z else None\n\n        # retrieve A or Z termination path using Netbox REST API\n        if termination_a is not None:\n            circuit_path = self.rest(\n                instance=instance,\n                method=\"get\",\n                api=f\"/circuits/circuit-terminations/{termination_a}/paths/\",\n            )\n        elif termination_z is not None:\n            circuit_path = self.rest(\n                instance=instance,\n                method=\"get\",\n                api=f\"/circuits/circuit-terminations/{termination_z}/paths/\",\n            )\n        else:\n            continue\n\n        # check if circuit ends connect to device or provider network\n        if (\n            not circuit_path\n            or \"name\" not in circuit_path[0][\"path\"][0][0]\n            or \"name\" not in circuit_path[0][\"path\"][-1][-1]\n        ):\n            continue\n\n        # form A and Z connection endpoints\n        end_a = {\n            \"device\": circuit_path[0][\"path\"][0][0]\n            .get(\"device\", {})\n            .get(\"name\", False),\n            \"provider_network\": \"provider-network\"\n            in circuit_path[0][\"path\"][0][0][\"url\"],\n            \"name\": circuit_path[0][\"path\"][0][0][\"name\"],\n        }\n        end_z = {\n            \"device\": circuit_path[0][\"path\"][-1][-1]\n            .get(\"device\", {})\n            .get(\"name\", False),\n            \"provider_network\": \"provider-network\"\n            in circuit_path[0][\"path\"][-1][-1][\"url\"],\n            \"name\": circuit_path[0][\"path\"][-1][-1][\"name\"],\n        }\n        circuit[\"is_active\"] = circuit_path[0][\"is_active\"]\n\n        # map path ends to devices\n        if end_a[\"device\"] and end_a[\"device\"] in devices:\n            ret.result[end_a[\"device\"]][cid] = copy.deepcopy(circuit)\n            ret.result[end_a[\"device\"]][cid][\"interface\"] = end_a[\"name\"]\n            if end_z[\"device\"]:\n                ret.result[end_a[\"device\"]][cid][\"remote_device\"] = end_z[\"device\"]\n                ret.result[end_a[\"device\"]][cid][\"remote_interface\"] = end_z[\"name\"]\n            elif end_z[\"provider_network\"]:\n                ret.result[end_a[\"device\"]][cid][\"provider_network\"] = end_z[\"name\"]\n        if end_z[\"device\"] and end_z[\"device\"] in devices:\n            ret.result[end_z[\"device\"]][cid] = copy.deepcopy(circuit)\n            ret.result[end_z[\"device\"]][cid][\"interface\"] = end_z[\"name\"]\n            if end_a[\"device\"]:\n                ret.result[end_z[\"device\"]][cid][\"remote_device\"] = end_a[\"device\"]\n                ret.result[end_z[\"device\"]][cid][\"remote_interface\"] = end_a[\"name\"]\n            elif end_a[\"provider_network\"]:\n                ret.result[end_z[\"device\"]][cid][\"provider_network\"] = end_a[\"name\"]\n\n    return ret\n
    "},{"location":"netbox_worker_api_reference/#norfab.workers.netbox_worker.NetboxWorker.get_nornir_inventory","title":"get_nornir_inventory(filters=None, devices=None, instance=None, interfaces=False, connections=False, circuits=False, nbdata=False, primary_ip='ip4')","text":"

    Method to query Netbox and return devices data in Nornir inventory format.

    Source code in norfab\\workers\\netbox_worker.py
    def get_nornir_inventory(\n    self,\n    filters: list = None,\n    devices: list = None,\n    instance: str = None,\n    interfaces: Union[dict, bool] = False,\n    connections: Union[dict, bool] = False,\n    circuits: Union[dict, bool] = False,\n    nbdata: bool = False,\n    primary_ip: str = \"ip4\",\n) -> Result:\n    \"\"\"\n    Method to query Netbox and return devices data in Nornir inventory format.\n    \"\"\"\n    hosts = {}\n    inventory = {\"hosts\": hosts}\n    ret = Result(task=f\"{self.name}:get_nornir_inventory\", result=inventory)\n\n    # check Netbox status\n    netbox_status = self.get_netbox_status(instance=instance)\n    if netbox_status.result[instance or self.default_instance][\"status\"] is False:\n        return ret\n\n    # retrieve devices data\n    nb_devices = self.get_devices(\n        filters=filters, devices=devices, instance=instance\n    )\n\n    # form Nornir hosts inventory\n    for device_name, device in nb_devices.result.items():\n        host = device[\"config_context\"].pop(\"nornir\", {})\n        host.setdefault(\"data\", {})\n        name = host.pop(\"name\", device_name)\n        hosts[name] = host\n        # add platform if not provided in device config context\n        if not host.get(\"platform\"):\n            if device[\"platform\"]:\n                host[\"platform\"] = device[\"platform\"][\"name\"]\n            else:\n                log.warning(f\"{self.name} - no platform found for '{name}' device\")\n        # add hostname if not provided in config context\n        if not host.get(\"hostname\"):\n            if device[\"primary_ip4\"] and primary_ip in [\"ip4\", \"ipv4\"]:\n                host[\"hostname\"] = device[\"primary_ip4\"][\"address\"].split(\"/\")[0]\n            elif device[\"primary_ip6\"] and primary_ip in [\"ip6\", \"ipv6\"]:\n                host[\"hostname\"] = device[\"primary_ip6\"][\"address\"].split(\"/\")[0]\n            else:\n                host[\"hostname\"] = name\n        # add netbox data to host's data\n        if nbdata is True:\n            host[\"data\"].update(device)\n\n    # add interfaces data\n    if interfaces:\n        # decide on get_interfaces arguments\n        kwargs = interfaces if isinstance(interfaces, dict) else {}\n        # add 'interfaces' key to all hosts' data\n        for host in hosts.values():\n            host[\"data\"].setdefault(\"interfaces\", {})\n        # query interfaces data from netbox\n        nb_interfaces = self.get_interfaces(\n            devices=list(hosts), instance=instance, **kwargs\n        )\n        # save interfaces data to hosts' inventory\n        while nb_interfaces.result:\n            device, device_interfaces = nb_interfaces.result.popitem()\n            hosts[device][\"data\"][\"interfaces\"] = device_interfaces\n\n    # add connections data\n    if connections:\n        # decide on get_interfaces arguments\n        kwargs = connections if isinstance(connections, dict) else {}\n        # add 'connections' key to all hosts' data\n        for host in hosts.values():\n            host[\"data\"].setdefault(\"connections\", {})\n        # query connections data from netbox\n        nb_connections = self.get_connections(\n            devices=list(hosts), instance=instance, **kwargs\n        )\n        # save connections data to hosts' inventory\n        while nb_connections.result:\n            device, device_connections = nb_connections.result.popitem()\n            hosts[device][\"data\"][\"connections\"] = device_connections\n\n    # add circuits data\n    if circuits:\n        # decide on get_interfaces arguments\n        kwargs = circuits if isinstance(circuits, dict) else {}\n        # add 'circuits' key to all hosts' data\n        for host in hosts.values():\n            host[\"data\"].setdefault(\"circuits\", {})\n        # query circuits data from netbox\n        nb_circuits = self.get_circuits(\n            devices=list(hosts), instance=instance, **kwargs\n        )\n        # save circuits data to hosts' inventory\n        while nb_circuits.result:\n            device, device_circuits = nb_circuits.result.popitem()\n            hosts[device][\"data\"][\"circuits\"] = device_circuits\n\n    return ret\n
    "},{"location":"netbox_worker_api_reference/#norfab.workers.netbox_worker.NetboxWorker.update_device_facts","title":"update_device_facts(instance=None, dry_run=False, via='nornir', timeout=60, **kwargs)","text":"

    Function to update device facts in Netbox using information provided by NAPALM get_facts getter:

    • serial number

    Parameters:

    Name Type Description Default instance str

    Netbox instance name

    None dry_run bool

    return information that would be pushed to Netbox but do not push it

    False via str

    service name to use to retrieve devices' data, default is nornir parse task

    'nornir' timeout int

    seconds to wait before timeout data retrieval job

    60 kwargs

    any additional arguments to send to service for device data retrieval

    {} Source code in norfab\\workers\\netbox_worker.py
    def update_device_facts(\n    self,\n    instance: str = None,\n    dry_run: bool = False,\n    via: str = \"nornir\",\n    timeout: int = 60,\n    **kwargs,\n):\n    \"\"\"\n    Function to update device facts in Netbox using information\n    provided by NAPALM get_facts getter:\n\n    - serial number\n    - software version\n    -\n\n    :param instance: Netbox instance name\n    :param dry_run: return information that would be pushed to Netbox but do not push it\n    :param via: service name to use to retrieve devices' data, default is nornir parse task\n    :param timeout: seconds to wait before timeout data retrieval job\n    :param kwargs: any additional arguments to send to service for device data retrieval\n    \"\"\"\n    result = {}\n    ret = Result(task=f\"{self.name}:push_device_facts\", result=result)\n    nb = self._get_pynetbox(instance)\n\n    if via == \"nornir\":\n        data = self.client.run_job(\n            \"nornir\",\n            \"parse\",\n            kwargs=kwargs,\n            workers=\"all\",\n            timeout=timeout,\n        )\n        for worker, results in data.items():\n            for host, host_data in results[\"result\"].items():\n                facts = host_data[\"napalm_get\"][\"get_facts\"]\n                nb_device = nb.dcim.devices.get(name=host)\n                if not nb_device:\n                    raise Exception(f\"'{host}' does not exist in Netbox\")\n                nb_device.serial = facts[\"serial_number\"]\n                if \"OS Version\" not in nb_device.comments:\n                    nb_device.comments += f\"\\nOS Version: {facts['os_version']}\"\n                nb_device.save()\n                result[host] = {\n                    \"update_device_facts\": {\n                        \"serial\": facts[\"serial_number\"],\n                        \"os_version\": facts[\"os_version\"],\n                    }\n                }\n    else:\n        raise UnsupportedServiceError(f\"'{via}' service not supported\")\n\n    return ret\n
    "},{"location":"netbox_worker_api_reference/#norfab.workers.netbox_worker.NetboxWorker.update_device_facts--software-version","title":"software version","text":""},{"location":"netbox_worker_api_reference/#norfab.workers.netbox_worker.NetboxWorker.get_next_ip","title":"get_next_ip(prefix, description=None, device=None, interface=None, vrf=None, interface_create=True, secondary=False, tags=None, dns_name=None, tenant=None, comments=None, instance=None, dry_run=False)","text":"

    Method to retrieve existing or allocate new IP address in Netbox.

    Parameters:

    Name Type Description Default prefix str

    IPv4 or IPv6 prefix e.g. 10.0.0.0/24 or prefix description to allocate next available IP Address from

    required description str

    IP address description to record in Netbox database

    None device str

    device name to find interface for and link IP address with

    None interface str

    interface name to link IP address with, device attribute also must be provided

    None Source code in norfab\\workers\\netbox_worker.py
    def get_next_ip(\n    self,\n    prefix: str,\n    description: str = None,\n    device: str = None,\n    interface: str = None,\n    vrf: str = None,\n    interface_create: bool = True,\n    secondary: bool = False,\n    tags: list = None,\n    dns_name: str = None,\n    tenant: str = None,\n    comments: str = None,\n    instance: str = None,\n    dry_run: bool = False,\n):\n    \"\"\"\n    Method to retrieve existing or allocate new IP address in Netbox.\n\n    :param prefix: IPv4 or IPv6 prefix e.g. ``10.0.0.0/24`` or prefix description\n        to allocate next available IP Address from\n    :param description: IP address description to record in Netbox database\n    :param device: device name to find interface for and link IP address with\n    :param interface: interface name to link IP address with, ``device`` attribute\n        also must be provided\n\n    \"\"\"\n    print(f\"!!!!!!!!!!!! prefix {prefix}, description {description}\")\n    nb = self._get_pynetbox(instance)\n    nb_prefix = nb.ipam.prefixes.get(prefix=prefix, vrf=vrf)\n    nb_ip = nb_prefix.available_ips.create()\n    if description is not None:\n        nb_ip.description = description\n    nb_ip.save()\n\n    return Result(result=str(nb_ip))\n
    "},{"location":"nfcli_client_api_reference/","title":"NFCLI (PICLE Shell) Client","text":""},{"location":"nfcli_client_api_reference/#norfab.clients.picle_shell_client--picle-shell-client","title":"PICLE Shell CLient","text":"

    Client that implements interactive shell to work with NorFab.

    "},{"location":"nfcli_client_api_reference/#norfab.clients.picle_shell_client.FileServiceCommands","title":"FileServiceCommands","text":"

    Bases: BaseModel

    "},{"location":"nfcli_client_api_reference/#norfab.clients.picle_shell_client.FileServiceCommands--sample-usage","title":"Sample Usage","text":""},{"location":"nfcli_client_api_reference/#norfab.clients.picle_shell_client.FileServiceCommands--copy","title":"copy","text":"

    Copy to client's fetched files directory:

    file copy_ url nf://cli/commands.txt

    Copy file to destination relative to current directory

    file copy_ url nf://cli/commands.txt destination commands.txt

    "},{"location":"nfcli_client_api_reference/#norfab.clients.picle_shell_client.FileServiceCommands--list","title":"list","text":"

    List files at broker root directory:

    file list file list url nf://

    List files details:

    file details\nfile details url nf://\n
    "},{"location":"nfcli_client_api_reference/#norfab.clients.picle_shell_client.NorFabShell","title":"NorFabShell","text":"

    Bases: BaseModel

    "},{"location":"nfcli_client_api_reference/#norfab.clients.picle_shell_client.NorFabShell.cmd_preloop_override","title":"cmd_preloop_override() classmethod","text":"

    This method called before CMD loop starts

    Source code in norfab\\clients\\picle_shell_client.py
    @classmethod\ndef cmd_preloop_override(self):\n    \"\"\"This method called before CMD loop starts\"\"\"\n    pass\n
    "},{"location":"norfab_architecture/","title":"NORFAB Architecture","text":""},{"location":"norfab_architecture/#high-level-design","title":"High Level Design","text":""},{"location":"norfab_architecture/#low-level-design","title":"Low Level Design","text":"

    Low level design revolves around resource oriented services - services that manage resources, where resources could be databases, network devices, file system etc.

    "},{"location":"norfab_architecture/#jobs-execution-flow","title":"Jobs Execution Flow","text":"

    There are multiple job flows implemented:

    • JOB POST FLOW - for clients to publish jobs to workers
    • JOB LOOP - job execution performed by workers
    • JOB GET FLOW - for clients to retrieve job execution results

    Above flows depicted on the diagram.

    "},{"location":"norfab_broker_reference/","title":"Broker","text":"

    Majordomo Protocol broker A minimal implementation of http:#rfc.zeromq.org/spec:7 and spec:8

    Author: Min RK benjaminrk@gmail.com Based on Java example by Arkadiusz Orzechowski

    "},{"location":"norfab_broker_reference/#norfab.core.broker.NFPService","title":"NFPService(name)","text":"

    Bases: object

    A single NFP Service

    Source code in norfab\\core\\broker.py
    def __init__(self, name: str):\n    self.name = name  # Service name\n    self.workers = []  # list of known workers\n
    "},{"location":"norfab_broker_reference/#norfab.core.broker.NFPWorker","title":"NFPWorker(address, socket, socket_lock, multiplier, keepalive, service=None, log_level='WARNING')","text":"

    Bases: object

    An NFP Worker convenience class

    Source code in norfab\\core\\broker.py
    def __init__(\n    self,\n    address: str,\n    socket,\n    socket_lock,\n    multiplier: int,  # e.g. 6 times\n    keepalive: int,  # e.g. 5000 ms\n    service: NFPService = None,\n    log_level: str = \"WARNING\",\n):\n    self.address = address  # Address to route to\n    self.service = service\n    self.ready = False\n    self.socket = socket\n    self.exit_event = threading.Event()\n    self.keepalive = keepalive\n    self.multiplier = multiplier\n    self.socket_lock = socket_lock\n    self.log_level = log_level\n
    "},{"location":"norfab_broker_reference/#norfab.core.broker.NFPWorker.is_ready","title":"is_ready()","text":"

    True if worker signaled W.READY

    Source code in norfab\\core\\broker.py
    def is_ready(self):\n    \"\"\"True if worker signaled W.READY\"\"\"\n    return self.service is not None and self.ready is True\n
    "},{"location":"norfab_broker_reference/#norfab.core.broker.NFPWorker.destroy","title":"destroy(disconnect=False)","text":"

    Clean up routine

    Source code in norfab\\core\\broker.py
    def destroy(self, disconnect=False):\n    \"\"\"Clean up routine\"\"\"\n    self.exit_event.set()\n    self.keepaliver.stop()\n    self.service.workers.remove(self)\n\n    if disconnect is True:\n        msg = [self.address, b\"\", NFP.WORKER, self.service.name, NFP.DISCONNECT]\n        with self.socket_lock:\n            self.socket.send_multipart(msg)\n
    "},{"location":"norfab_broker_reference/#norfab.core.broker.NFPBroker","title":"NFPBroker(endpoint, exit_event, inventory, log_level='WARNING', multiplier=6, keepalive=2500, base_dir='')","text":"

    NORFAB Protocol broker

    Initialize broker state.

    Source code in norfab\\core\\broker.py
    def __init__(\n    self,\n    endpoint: str,\n    exit_event: Event,\n    inventory: NorFabInventory,\n    log_level: str = \"WARNING\",\n    multiplier: int = 6,\n    keepalive: int = 2500,\n    base_dir: str = \"\",\n):\n    \"\"\"Initialize broker state.\"\"\"\n    log.setLevel(log_level.upper())\n    self.log_level = log_level\n    self.keepalive = keepalive\n    self.multiplier = multiplier\n\n    self.services = {}\n    self.workers = {}\n    self.exit_event = exit_event\n    self.inventory = inventory\n\n    self.ctx = zmq.Context()\n    self.socket = self.ctx.socket(zmq.ROUTER)\n    self.socket.linger = 0\n    self.poller = zmq.Poller()\n    self.poller.register(self.socket, zmq.POLLIN)\n    self.socket.bind(endpoint)\n    self.socket_lock = (\n        threading.Lock()\n    )  # used for keepalives to protect socket object\n\n    self.base_dir = base_dir or os.getcwd()\n    os.makedirs(self.base_dir, exist_ok=True)\n\n    log.debug(f\"NFPBroker - is read and listening on {endpoint}\")\n
    "},{"location":"norfab_broker_reference/#norfab.core.broker.NFPBroker.mediate","title":"mediate()","text":"

    Main broker work happens here

    Client send messages of this frame format:

    Source code in norfab\\core\\broker.py
    def mediate(self):\n    \"\"\"\n    Main broker work happens here\n\n    Client send messages of this frame format:\n\n\n    \"\"\"\n    while True:\n        try:\n            items = self.poller.poll(self.keepalive)\n        except KeyboardInterrupt:\n            break  # Interrupted\n\n        if items:\n            msg = self.socket.recv_multipart()\n            log.debug(f\"NFPBroker - received '{msg}'\")\n\n            sender = msg.pop(0)\n            empty = msg.pop(0)\n            header = msg.pop(0)\n\n            if header == NFP.CLIENT:\n                self.process_client(sender, msg)\n            elif header == NFP.WORKER:\n                self.process_worker(sender, msg)\n\n        self.purge_workers()\n\n        # check if need to stop\n        if self.exit_event.is_set():\n            self.destroy()\n            break\n
    "},{"location":"norfab_broker_reference/#norfab.core.broker.NFPBroker.destroy","title":"destroy()","text":"

    Disconnect all workers, destroy context.

    Source code in norfab\\core\\broker.py
    def destroy(self):\n    \"\"\"Disconnect all workers, destroy context.\"\"\"\n    log.info(f\"NFPBroker - interrupt received, killing broker\")\n    for name in list(self.workers.keys()):\n        # in case worker self destroyed while we iterating\n        if self.workers.get(name):\n            self.delete_worker(self.workers[name], True)\n    self.ctx.destroy(0)\n
    "},{"location":"norfab_broker_reference/#norfab.core.broker.NFPBroker.delete_worker","title":"delete_worker(worker, disconnect)","text":"

    Deletes worker from all data structures, and deletes worker.

    Source code in norfab\\core\\broker.py
    def delete_worker(self, worker, disconnect):\n    \"\"\"Deletes worker from all data structures, and deletes worker.\"\"\"\n    worker.destroy(disconnect)\n    self.workers.pop(worker.address, None)\n
    "},{"location":"norfab_broker_reference/#norfab.core.broker.NFPBroker.purge_workers","title":"purge_workers()","text":"

    Look for & delete expired workers.

    Source code in norfab\\core\\broker.py
    def purge_workers(self):\n    \"\"\"Look for & delete expired workers.\"\"\"\n    for name in list(self.workers.keys()):\n        # in case worker self destroyed while we iterating\n        if self.workers.get(name):\n            w = self.workers[name]\n        if not w.keepaliver.is_alive():\n            self.delete_worker(w, False)\n
    "},{"location":"norfab_broker_reference/#norfab.core.broker.NFPBroker.send_to_worker","title":"send_to_worker(worker, command, sender, uuid, data)","text":"

    Send message to worker. If message is provided, sends that message.

    Source code in norfab\\core\\broker.py
    def send_to_worker(\n    self, worker: NFPWorker, command: bytes, sender: bytes, uuid: bytes, data: bytes\n):\n    \"\"\"Send message to worker. If message is provided, sends that message.\"\"\"\n    # Stack routing and protocol envelopes to start of message\n    if command == NFP.POST:\n        msg = [worker.address, b\"\", NFP.WORKER, NFP.POST, sender, b\"\", uuid, data]\n    elif command == NFP.GET:\n        msg = [worker.address, b\"\", NFP.WORKER, NFP.GET, sender, b\"\", uuid, data]\n    else:\n        log.error(f\"NFPBroker - invalid worker command: {command}\")\n        return\n    with self.socket_lock:\n        log.debug(f\"NFPBroker - sending to worker '{msg}'\")\n        self.socket.send_multipart(msg)\n
    "},{"location":"norfab_broker_reference/#norfab.core.broker.NFPBroker.send_to_client","title":"send_to_client(client, command, service, message)","text":"

    Send message to client.

    Source code in norfab\\core\\broker.py
    def send_to_client(self, client: str, command: str, service: str, message: list):\n    \"\"\"Send message to client.\"\"\"\n    # Stack routing and protocol envelopes to start of message\n    if command == NFP.RESPONSE:\n        msg = [client, b\"\", NFP.CLIENT, NFP.RESPONSE, service] + message\n    elif command == NFP.EVENT:\n        msg = [client, b\"\", NFP.CLIENT, NFP.EVENT, service] + message\n    else:\n        log.error(f\"NFPBroker - invalid client command: {command}\")\n        return\n    with self.socket_lock:\n        log.debug(f\"NFPBroker - sending to client '{msg}'\")\n        self.socket.send_multipart(msg)\n
    "},{"location":"norfab_broker_reference/#norfab.core.broker.NFPBroker.process_worker","title":"process_worker(sender, msg)","text":"

    Process message received from worker.

    Source code in norfab\\core\\broker.py
    def process_worker(self, sender, msg):\n    \"\"\"Process message received from worker.\"\"\"\n    command = msg.pop(0)\n    worker = self.require_worker(sender)\n\n    if NFP.READY == command and not worker.is_ready():\n        service = msg.pop(0)\n        worker.service = self.require_service(service)\n        worker.ready = True\n        worker.start_keepalives()\n        worker.service.workers.append(worker)\n    elif NFP.RESPONSE == command and worker.is_ready():\n        client = msg.pop(0)\n        empty = msg.pop(0)\n        self.send_to_client(client, NFP.RESPONSE, worker.service.name, msg)\n    elif NFP.KEEPALIVE == command:\n        worker.keepaliver.received_heartbeat([worker.address] + msg)\n    elif NFP.DISCONNECT == command and worker.is_ready():\n        self.delete_worker(worker, False)\n    elif NFP.EVENT == command and worker.is_ready():\n        client = msg.pop(0)\n        empty = msg.pop(0)\n        self.send_to_client(client, NFP.EVENT, worker.service.name, msg)\n    elif not worker.is_ready():\n        self.delete_worker(worker, disconnect=True)\n    else:\n        log.error(f\"NFPBroker - invalid message: {msg}\")\n
    "},{"location":"norfab_broker_reference/#norfab.core.broker.NFPBroker.require_worker","title":"require_worker(address)","text":"

    Finds the worker, creates if necessary.

    Source code in norfab\\core\\broker.py
    def require_worker(self, address):\n    \"\"\"Finds the worker, creates if necessary.\"\"\"\n    if not self.workers.get(address):\n        self.workers[address] = NFPWorker(\n            address=address,\n            socket=self.socket,\n            multiplier=self.multiplier,\n            keepalive=self.keepalive,\n            socket_lock=self.socket_lock,\n            log_level=self.log_level,\n        )\n        log.info(f\"NFPBroker - registered new worker {address}\")\n\n    return self.workers[address]\n
    "},{"location":"norfab_broker_reference/#norfab.core.broker.NFPBroker.require_service","title":"require_service(name)","text":"

    Locates the service (creates if necessary).

    Source code in norfab\\core\\broker.py
    def require_service(self, name):\n    \"\"\"Locates the service (creates if necessary).\"\"\"\n    if not self.services.get(name):\n        service = NFPService(name)\n        self.services[name] = service\n        log.debug(f\"NFPBroker - registered new service {name}\")\n\n    return self.services[name]\n
    "},{"location":"norfab_broker_reference/#norfab.core.broker.NFPBroker.process_client","title":"process_client(sender, msg)","text":"

    Process a request coming from a client.

    Source code in norfab\\core\\broker.py
    def process_client(self, sender, msg):\n    \"\"\"Process a request coming from a client.\"\"\"\n    command = msg.pop(0)\n    service = msg.pop(0)\n    target = msg.pop(0)\n    uuid = msg.pop(0)\n    data = msg.pop(0)\n\n    # check if valid command from client\n    if command not in NFP.client_commands:\n        message = f\"NFPBroker - Unsupported client command '{command}'\"\n        log.error(message)\n        self.send_to_client(\n            sender, NFP.RESPONSE, service, [message.encode(\"utf-8\")]\n        )\n    # Management Interface\n    elif service == b\"mmi.service.broker\":\n        self.mmi_service(sender, command, target, uuid, data)\n    elif service == b\"sid.service.broker\":\n        self.inventory_service(sender, command, target, uuid, data)\n    elif service == b\"fss.service.broker\":\n        self.file_sharing_service(sender, command, target, uuid, data)\n    else:\n        self.dispatch(\n            sender, command, self.require_service(service), target, uuid, data\n        )\n
    "},{"location":"norfab_broker_reference/#norfab.core.broker.NFPBroker.filter_workers","title":"filter_workers(target, service)","text":"

    Helper function to filter workers

    Parameters:

    Name Type Description Default target bytes

    bytest string, workers target

    required service NFPService

    NFPService object

    required Source code in norfab\\core\\broker.py
    def filter_workers(self, target: bytes, service: NFPService) -> list:\n    \"\"\"\n    Helper function to filter workers\n\n    :param target: bytest string, workers target\n    :param service: NFPService object\n    \"\"\"\n    ret = []\n    if not service.workers:\n        log.warning(\n            f\"NFPBroker - '{service.name}' has no active workers registered, try later\"\n        )\n        ret = []\n    elif target == b\"any\":\n        ret = [service.workers[random.randint(0, len(service.workers) - 1)]]\n    elif target == b\"all\":\n        ret = service.workers\n    elif target in self.workers:  # single worker\n        ret = [self.workers[target]]\n    else:  # target list of workers\n        try:\n            target = json.loads(target)\n            if isinstance(target, list):\n                for w in target:\n                    w = w.encode(\"utf-8\")\n                    if w in self.workers:\n                        ret.append(self.workers[w])\n                ret = list(set(ret))  # dedup workers\n        except Exception as e:\n            log.error(\n                f\"NFPBroker - Failed to load target '{target}' with error '{e}'\"\n            )\n    return ret\n
    "},{"location":"norfab_broker_reference/#norfab.core.broker.NFPBroker.dispatch","title":"dispatch(sender, command, service, target, uuid, data)","text":"

    Dispatch requests to waiting workers as possible

    Parameters:

    Name Type Description Default service

    service object

    required target

    string indicating workers addresses to dispatch to

    required msg

    string with work request content

    required Source code in norfab\\core\\broker.py
    def dispatch(self, sender, command, service, target, uuid, data):\n    \"\"\"\n    Dispatch requests to waiting workers as possible\n\n    :param service: service object\n    :param target: string indicating workers addresses to dispatch to\n    :param msg: string with work request content\n    \"\"\"\n    log.debug(\n        f\"NFPBroker - dispatching request to workers: sender '{sender}', \"\n        f\"command '{command}', service '{service.name}', target '{target}'\"\n        f\"data '{data}', uuid '{uuid}'\"\n    )\n    self.purge_workers()\n    workers = self.filter_workers(target, service)\n\n    # handle case when service has no workers registered\n    if not workers:\n        message = f\"NFPBroker - {service.name} service failed to target workers '{target}'\"\n        log.error(message)\n        self.send_to_client(\n            sender,\n            NFP.RESPONSE,\n            service.name,\n            [uuid, b\"400\", message.encode(\"utf-8\")],\n        )\n    else:\n        # inform client that JOB dispatched\n        w_addresses = [w.address.decode(\"utf-8\") for w in workers]\n        self.send_to_client(\n            sender,\n            NFP.RESPONSE,\n            service.name,\n            [\n                uuid,\n                b\"202\",\n                json.dumps(\n                    {\n                        \"workers\": w_addresses,\n                        \"uuid\": uuid.decode(\"utf-8\"),\n                        \"target\": target.decode(\"utf-8\"),\n                        \"status\": \"DISPATCHED\",\n                        \"service\": service.name.decode(\"utf-8\"),\n                    }\n                ).encode(\"utf-8\"),\n            ],\n        )\n        # send job to workers\n        for worker in workers:\n            self.send_to_worker(worker, command, sender, uuid, data)\n
    "},{"location":"norfab_broker_reference/#norfab.core.broker.NFPBroker.mmi_service","title":"mmi_service(sender, command, target, uuid, data)","text":"

    Handle internal service according to 8/MMI specification

    Source code in norfab\\core\\broker.py
    def mmi_service(self, sender, command, target, uuid, data):\n    \"\"\"Handle internal service according to 8/MMI specification\"\"\"\n    log.debug(\n        f\"mmi.service.broker - processing request: sender '{sender}', \"\n        f\"command '{command}', target '{target}'\"\n        f\"data '{data}', uuid '{uuid}'\"\n    )\n    data = json.loads(data)\n    task = data.get(\"task\")\n    args = data.get(\"args\", [])\n    kwargs = data.get(\"kwargs\", {})\n    ret = f\"Unsupported task '{task}'\"\n    if task == \"show_workers\":\n        if self.workers:\n            ret = [\n                {\n                    \"name\": w.address.decode(\"utf-8\"),\n                    \"service\": w.service.name.decode(\"utf-8\"),\n                    \"status\": \"alive\" if w.keepaliver.is_alive() else \"dead\",\n                    \"holdtime\": str(w.keepaliver.show_holdtime()),\n                    \"keepalives tx/rx\": f\"{w.keepaliver.keepalives_send} / {w.keepaliver.keepalives_received}\",\n                    \"alive (s)\": str(w.keepaliver.show_alive_for()),\n                }\n                for k, w in self.workers.items()\n            ]\n            # filter reply\n            service = kwargs.get(\"service\")\n            status = kwargs.get(\"status\")\n            if service and service != \"all\":\n                ret = [w for w in ret if w[\"service\"] == service]\n            if status in [\"alive\", \"dead\"]:\n                ret = [w for w in ret if w[\"status\"] == status]\n            if not ret:\n                ret = [{\"name\": \"\", \"service\": \"\", \"status\": \"\"}]\n        else:\n            ret = [{\"name\": \"\", \"service\": \"\", \"status\": \"\"}]\n    elif task == \"show_broker\":\n        ret = {\n            \"address\": self.socket.getsockopt_string(zmq.LAST_ENDPOINT),\n            \"status\": \"active\",\n            \"multiplier\": self.multiplier,\n            \"keepalive\": self.keepalive,\n            \"workers count\": len(self.workers),\n            \"services count\": len(self.services),\n            \"base_dir\": self.base_dir,\n        }\n    reply = json.dumps(ret).encode(\"utf-8\")\n    self.send_to_client(\n        sender, NFP.RESPONSE, b\"mmi.service.broker\", [uuid, b\"200\", reply]\n    )\n
    "},{"location":"norfab_cli_overview/","title":"NORFAB CLI","text":"

    NorFab CLI designed as a modal operating system. The term modal describes a system that has various modes of operation, each having its own domain of operation. The CLI uses a hierarchical structure for the modes.

    You can access a lower-level mode only from a higher-level mode. For example, to access the Nornir mode, you must be in the privileged EXEC mode. Each mode is used to accomplish particular tasks and has a specific set of commands that are available in this mode. For example, to configure a router interface, you must be in Nornir configuration mode. All configurations that you enter in configuration mode apply only to this function.

    NorFab CLI build using PICLE package.

    It is important to remember that in PICLE Shell, when you enter a command, the command is executed. If you enter an incorrect command in a production environment, it can negatively affect it.

    "},{"location":"norfab_exceptions_reference/","title":"Exceptions","text":""},{"location":"norfab_exceptions_reference/#norfab.core.exceptions.UnsupportedPluginError","title":"UnsupportedPluginError","text":"

    Bases: Exception

    Exception to raise when specified plugin not supported

    "},{"location":"norfab_exceptions_reference/#norfab.core.exceptions.UnsupportedServiceError","title":"UnsupportedServiceError","text":"

    Bases: Exception

    Exception to raise when specified service not supported

    "},{"location":"norfab_getting_started/","title":"Getting Started","text":"

    Once NorFab installed, next step is to create a folder that will hold your environment and start creating inventory files with required configurations.

    Create norfab folder and inside of it create inventory.yaml file with this content:

    inventory.yaml
    broker: # (1)!\n  endpoint: \"tcp://127.0.0.1:5555\" # (2)!\n\nworkers: # (3)!\n  nornir-*: # (4)!\n    - nornir/common.yaml   \n  nornir-worker-1: # (5)!\n    - nornir/nornir-worker-1.yaml\n\ntopology: # (6)!\n  broker: True # (7)!\n  workers: # (8)!\n    - nornir-worker-1\n
    1. Broker configuration section
    2. URL to listen for connections on - localhost port 5555 in this case
    3. Workers configuration section
    4. glob pattern that will match all workers with nornir- in the name and map common.yaml context for each such a worker
    5. Worker definition to map inventory file to a specific worker that has name nornir-worker-1
    6. Topology section to define what components to run
    7. Start broker process
    8. List of workers names to start processes for

    In this example we are working with Nornir service.

    Create nornir folder and inside of it create two files.

    First file common.yaml to host configuration common for all Nornir service workers:

    common.yaml
    service: nornir # (1)!\nbroker_endpoint: \"tcp://127.0.0.1:5555\" # (2)!\n\n# next comes Nornir inventory and configuration\nrunner: # (3)!\n  plugin: RetryRunner\ndefault: {} # (4)!\ngroups: {} # (5)!\n
    1. Name of the service this worker hosting
    2. Broker URL to initiate connections with
    3. Nornir runner plugin configuration
    4. Nornir default data section
    5. Nornir groups definition section

    Second file specific to the worker with name nornir-worker-1 which holds Nornir inventory data:

    nornir-worker-1.yaml
    hosts: \n  R1:\n    hostname: r1.lab.local\n    platform: cisco_ios\n    username: developer\n    password: secretpassword\n  R2:\n    hostname: 10.0.0.2\n    platform: cisco_ios\n    username: developer\n    password: secretpassword\n

    This is how files structure will look like:

    \u2514\u2500\u2500\u2500norfab\n    \u2502   inventory.yaml\n    \u2502\n    \u2514\u2500\u2500\u2500nornir\n            common.yaml\n            nornir-worker-1.yaml\n

    Now you are ready to start NorFab Interactive Command Line Shell Client - NFCLI. Open terminal window, navigate to the folder where inventory.yaml located and start NFCLI:

    C:\\>cd norfab\nC:\\norfab>nfcli\nnf#\n

    this will start the NorFab broker process, Nornir worker process, instantiate NFCLI client and drop you into interactive command line shell

    nf#? # (1)!\n file      File sharing service\n netbox    Netbox service\n nornir    Nornir service\n show      NorFab show commands\n exit      Exit current shell\n help      Print help message\n pwd       Print current shell path\n top       Exit to top shell\nnf#show workers # (2)!\n name             service  status  holdtime  keepalives tx/rx  alive (s)\n nornir-worker-1  nornir   alive   12.8      58 / 58           149\nnf#\nnf#nornir # (3)!\nnf[nornir]#?\n cfg     Configure devices over CLI interface\n cli     Send CLI commands to devices\n show    Show Nornir service parameters\n task    Run Nornir task\n test    Run network tests\n end     Exit application\n exit    Exit current shell\n help    Print help message\n pwd     Print current shell path\n top     Exit to top shell\nnf[nornir]#show hosts\n {\n     \"nornir-worker-1\": [\n         \"R1\",\n         \"R2\"\n     ]\n }\nnf[nornir]# end\nExiting...\n
    1. Question mark plus enter to print commands help
    2. Run show command
    3. Drop into Nornir Service command shell

    NorFab CLI supports Tab completions, question mark help together with sub-shells, read more about NorFab CLI and how to use it here.

    That's it

    "},{"location":"norfab_installation/","title":"Installation","text":""},{"location":"norfab_installation/#norfab-core","title":"NorFab Core","text":"

    Install NorFab core from PyPI

    pip install norfab\n
    "},{"location":"norfab_installation/#extras","title":"Extras","text":"

    Several extra installations supported tailoring certain services dependencies depending on what you want to run on a given node.

    To install everything can use full extras:

    pip install norfab[full]\n
    "},{"location":"norfab_installation/#norfab-cli-dependencies","title":"NORFAB CLI Dependencies","text":"

    To install NorFab Interactive CLI dependencies

    pip install norfab[nfcli]\n
    "},{"location":"norfab_installation/#nornir-service-dependencies","title":"Nornir Service Dependencies","text":"

    To install Nornir service dependencies

    pip install norfab[nornir_service]\n
    "},{"location":"norfab_installation/#netbox-service-dependencies","title":"Netbox Service Dependencies","text":"

    To install Netbox service dependencies

    pip install norfab[netbox_service]\n
    "},{"location":"norfab_inventory/","title":"NorFab Inventory","text":"

    NorFab comes with Simple Inventory Datastore (SID) hosted by broker.

    "},{"location":"norfab_inventory/#broker-inventory","title":"Broker Inventory","text":"

    TBD

    "},{"location":"norfab_inventory/#workers-inventory","title":"Workers Inventory","text":"

    To understand how Simple Inventory Datastore serves workers inventory it is good to know that each worker has a unique name to identify it.

    With that in mind, the goal is to map inventory data to individual worker by its name.

    For example, let's pretend that worker name is nornir-worker-1 and we have common.yaml and nornir-worker-1.yaml files with inventory data that we need to provide worker with.

    To do the mapping between worker name and inventory files we can put this in NorFab inventory (inventory.yaml) file:

    workers:\n  nornir-*:\n    - nornir/common.yaml  \n  nornir-worker-1:\n    - nornir/nornir-worker-1.yaml\n

    Where files structure would look like this:

    \u2514\u2500\u2500\u2500rootfolder\n    \u2502   inventory.yaml\n    \u2502\n    \u2514\u2500\u2500\u2500nornir\n            common.yaml\n            nornir-worker-1.yaml\n

    As you can see, inventory.yaml file contains workers section with a dictionary keyed by glob patterns to match against workers' names, once worker name matched by the pattern, all items in the list underneaths that pattern being loaded and recursively merged. As such, process continues until all patterns evaluated. Final output of the process is a combined inventory data of all the matched files.

    The recursive logic of combining inventory data files is pretty straightforward - each next data file merged into the previous data file overriding the overlapping values.

    The glob pattern matching logic allows be as specific as required and map specific files to individual workers or to map single data file to multiple workers or map multiple files to multiple workers, all combinations supported.

    For example, we have a group of two workers with names netbox-wroker-1.1 and netbox-worker-1.2 and we want to map netbox_common.yaml to both of the workers, in that case NorFab inventory (inventory.yaml) file could have this content:

    workers:\n  netbox-worker-1.*:\n    - nornir/netbox_common.yaml  \n

    Where files structure would look like this:

    \u2514\u2500\u2500\u2500rootfolder\n    \u2502   inventory.yaml\n    \u2502\n    \u2514\u2500\u2500\u2500netbox\n            netbox_common.yaml\n

    Both workers will be served with netbox_common.yaml file content as an inventory data.

    "},{"location":"norfab_inventory/#workers-inventory-parameters","title":"Workers Inventory Parameters","text":"

    Workers inventory can contain these common parameters:

    1. service - name of the service this worker belongs to
    2. broker_endpoint - Broker URL to connect to

    Sample worker base inventory:

    service: nornir\nbroker_endpoint: \"tcp://127.0.0.1:5555\"\n

    The rest of the inventory data is worker specific.

    "},{"location":"norfab_inventory/#topology-inventory","title":"Topology Inventory","text":"

    Topology section of NorFab inventory identifies the components that need to be started on the given node.

    "},{"location":"norfab_inventory_reference/","title":"Simple Inventory","text":"

    Simple Local Inventory is an inventory plugin to load inventory data from locally stored files.

    Sample inventory file

    broker:\n  endpoint: \"tcp://127.0.0.1:5555\"\n\nworkers:\n  nornir-*:\n    - nornir/common.yaml  \n  nornir-worker-1:\n    - nornir/nornir-worker-1.yaml\n\ntopology:\n  broker: True\n  workers:\n    - nornir-worker-1\n

    where nornir/common.yaml contains

    service: nornir\nbroker_endpoint: \"tcp://127.0.0.1:5555\"\nrunner:\n  plugin: RetryRunner\n  options: \n    num_workers: 100\n    num_connectors: 10\n    connect_retry: 3\n    connect_backoff: 1000\n    connect_splay: 100\n    task_retry: 3\n    task_backoff: 1000\n    task_splay: 100\n    reconnect_on_fail: True\n    task_timeout: 600\n

    and nornir/nornir-worker-1.yaml contains

    hosts: \n  csr1000v-1:\n    hostname: sandbox-1.lab.com\n    platform: cisco_ios\n    username: developer\n    password: secretpassword\n  csr1000v-2:\n    hostname: sandbox-2.lab.com\n    platform: cisco_ios\n    username: developer\n    password: secretpassword\ngroups: {}\ndefaults: {}\n

    Whenever inventory queried to provide data for worker with name nornir-worker-1 Simple Inventory iterates over workers dictionary and recursively merges data for keys (glob patterns) that matched worker name.

    "},{"location":"norfab_inventory_reference/#norfab.core.inventory.WorkersInventory","title":"WorkersInventory(path, data)","text":"

    Class to collect and server NorFab workers inventory data, forming it by recursively merging all data files that associated with the name of worker requesting inventory data.

    Parameters:

    Name Type Description Default path str

    OS path to top folder with workers inventory data

    required data dict

    dictionary keyed by glob patterns matching workers names and values being a list of OS paths to files with workers inventory data

    required Source code in norfab\\core\\inventory.py
    def __init__(self, path: str, data: dict) -> None:\n    \"\"\"\n    Class to collect and server NorFab workers inventory data,\n    forming it by recursively merging all data files that associated\n    with the name of worker requesting inventory data.\n\n    :param path: OS path to top folder with workers inventory data\n    :param data: dictionary keyed by glob patterns matching workers names\n        and values being a list of OS paths to files with workers\n        inventory data\n    \"\"\"\n    self.path, _ = os.path.split(path)\n    self.data = data\n
    "},{"location":"norfab_inventory_reference/#norfab.core.inventory.NorFabInventory","title":"NorFabInventory(path)","text":"

    NorFabInventory class to instantiate simple inventory.

    Parameters:

    Name Type Description Default path str

    OS path to YAML file with inventory data

    required Source code in norfab\\core\\inventory.py
    def __init__(self, path: str) -> None:\n    \"\"\"\n    NorFabInventory class to instantiate simple inventory.\n\n    :param path: OS path to YAML file with inventory data\n    \"\"\"\n    self.broker = {}\n    self.workers = {}\n    self.topology = {}\n    path = os.path.abspath(path)\n    self.load(path)\n
    "},{"location":"norfab_inventory_reference/#norfab.core.inventory.merge_recursively","title":"merge_recursively(data, merge)","text":"

    Function to merge two dictionaries data recursively.

    Parameters:

    Name Type Description Default data dict

    primary dictionary

    required merge dict

    dictionary to merge into primary overriding the content

    required Source code in norfab\\core\\inventory.py
    def merge_recursively(data: dict, merge: dict) -> None:\n    \"\"\"\n    Function to merge two dictionaries data recursively.\n\n    :param data: primary dictionary\n    :param merge: dictionary to merge into primary overriding the content\n    \"\"\"\n    assert isinstance(data, dict) and isinstance(\n        merge, dict\n    ), f\"Only supports dictionary/dictionary data merges, not {type(data)}/{type(merge)}\"\n    for k, v in merge.items():\n        if k in data:\n            # merge two lists\n            if isinstance(data[k], list) and isinstance(v, list):\n                for i in v:\n                    if i not in data[k]:\n                        data[k].append(i)\n            # recursively merge dictionaries\n            elif isinstance(data[k], dict) and isinstance(v, dict):\n                merge_recursively(data[k], v)\n            # rewrite existing value with new data\n            else:\n                data[k] = v\n        else:\n            data[k] = v\n
    "},{"location":"norfab_nfapi_reference/","title":"NFAPI (Python API)","text":"

    Utility class to implement Python API for interfacing with NorFab.

    NorFab Python API Client initialization class

    from norfab.core.nfapi import NorFab\n\nnf = NorFab(inventory=inventory)\nnf.start(start_broker=True, workers=[\"my-worker-1\"])\nNFCLIENT = nf.client\n

    Parameters:

    Name Type Description Default inventory str

    OS path to NorFab inventory YAML file

    './inventory.yaml' log_level str

    one or supported logging levels - CRITICAL, ERROR, WARNING, INFO, DEBUG

    'WARNING' Source code in norfab\\core\\nfapi.py
    def __init__(\n    self, inventory: str = \"./inventory.yaml\", log_level: str = \"WARNING\"\n) -> None:\n    \"\"\"\n    NorFab Python API Client initialization class\n\n    ```\n    from norfab.core.nfapi import NorFab\n\n    nf = NorFab(inventory=inventory)\n    nf.start(start_broker=True, workers=[\"my-worker-1\"])\n    NFCLIENT = nf.client\n    ```\n\n    :param inventory: OS path to NorFab inventory YAML file\n    :param log_level: one or supported logging levels - `CRITICAL`, `ERROR`, `WARNING`, `INFO`, `DEBUG`\n    \"\"\"\n    self.inventory = NorFabInventory(inventory)\n    self.log_level = log_level\n    self.broker_endpoint = self.inventory.get(\"broker\", {}).get(\"endpoint\")\n    self.broker_exit_event = Event()\n    self.workers_exit_event = Event()\n    self.clients_exit_event = Event()\n
    "},{"location":"norfab_nfapi_reference/#norfab.core.nfapi.NorFab.start","title":"start(start_broker=None, workers=None)","text":"

    Main entry method to start NorFab components.

    Parameters:

    Name Type Description Default start_broker bool

    if True, starts broker process

    None workers list

    list of worker names to start processes for

    None Source code in norfab\\core\\nfapi.py
    def start(\n    self,\n    start_broker: bool = None,\n    workers: list = None,\n):\n    \"\"\"\n    Main entry method to start NorFab components.\n\n    :param start_broker: if True, starts broker process\n    :param workers: list of worker names to start processes for\n    \"\"\"\n    if workers is None:\n        workers = self.inventory.topology.get(\"workers\", [])\n    if start_broker is None:\n        start_broker = self.inventory.topology.get(\"broker\", False)\n\n    # form a list of workers to start\n    workers_to_start = set()\n    for worker_name in workers:\n        if isinstance(worker_name, dict):\n            worker_name = tuple(worker_name)[0]\n        workers_to_start.add(worker_name)\n\n    # start the broker\n    if start_broker is True:\n        self.start_broker()\n\n    # start all the workers\n    while workers_to_start != set(self.workers_processes.keys()):\n        for worker in workers:\n            # extract worker name and data/params\n            if isinstance(worker, dict):\n                worker_name = tuple(worker)[0]\n                worker_data = worker[worker_name]\n            else:\n                worker_name = worker\n                worker_data = {}\n            # verify if need to start this worker\n            if worker_name not in workers_to_start:\n                continue\n            # start worker\n            try:\n                self.start_worker(worker_name, worker_data)\n            # if failed to start remove from workers to start\n            except KeyError:\n                workers_to_start.remove(worker_name)\n                log.error(\n                    f\"'{worker_name}' - failed to start worker, no inventory data found\"\n                )\n            except FileNotFoundError as e:\n                workers_to_start.remove(worker_name)\n                log.error(\n                    f\"'{worker_name}' - failed to start worker, inventory file not found '{e}'\"\n                )\n            except Exception as e:\n                workers_to_start.remove(worker_name)\n                log.error(f\"'{worker_name}' - failed to start worker, error '{e}'\")\n\n        time.sleep(0.01)\n\n    # make the API client\n    self.make_client()\n
    "},{"location":"norfab_nfapi_reference/#norfab.core.nfapi.NorFab.destroy","title":"destroy()","text":"

    Stop NORFAB processes.

    Source code in norfab\\core\\nfapi.py
    def destroy(self) -> None:\n    \"\"\"\n    Stop NORFAB processes.\n    \"\"\"\n    # stop client\n    self.clients_exit_event.set()\n    self.client.destroy()\n    # stop workers\n    self.workers_exit_event.set()\n    while self.workers_processes:\n        _, w = self.workers_processes.popitem()\n        w[\"process\"].join()\n    # stop broker\n    self.broker_exit_event.set()\n    if self.broker:\n        self.broker.join()\n
    "},{"location":"norfab_nfapi_reference/#norfab.core.nfapi.NorFab.make_client","title":"make_client(broker_endpoint=None)","text":"

    Make an instance of NorFab client

    Parameters:

    Name Type Description Default broker_endpoint str

    (str), Broker URL to connect with

    None Source code in norfab\\core\\nfapi.py
    def make_client(self, broker_endpoint: str = None) -> NFPClient:\n    \"\"\"\n    Make an instance of NorFab client\n\n    :param broker_endpoint: (str), Broker URL to connect with\n    \"\"\"\n\n    if broker_endpoint or self.broker_endpoint:\n        client = NFPClient(\n            broker_endpoint or self.broker_endpoint,\n            \"NFPClient\",\n            self.log_level,\n            self.clients_exit_event,\n        )\n        if self.client is None:  # own the first client\n            self.client = client\n        return client\n    else:\n        log.error(\"Failed to make client, no broker endpoint defined\")\n        return None\n
    "},{"location":"norfab_python_api_overview/","title":"NORFAB Python API","text":"

    NorFab python API exists to run the Automations fabric, components that need to be started defined in inventory.yaml file. To start working with NorFab need to import core object and instantiate it.

    from norfab.core.nfapi import NorFab\n\nnf = NorFab(inventory=\"./inventory.yaml\")\nnf.start()\nnf.destroy()\n

    Refer to Getting Started section on how to construct inventory.yaml file.

    All interaction with NorFab happens via client. On NorFab start an instance of local client created automatically and can be used to submit the jobs

    import pprint\nfrom norfab.core.nfapi import NorFab\n\nnf = NorFab(inventory=\"./inventory.yaml\")\nnf.start()\n\nresult = nf.client.run_job(\n    service=\"nornir\",\n    task=\"cli\",\n    kwargs={\"commands\": [\"show version\", \"show clock\"]}\n)\n\npprint.pprint(ret)\n\nnf.destroy()\n
    "},{"location":"nornir_service/","title":"Nornir Service","text":"

    Nornir Service is built on the Nornir library, a well adopted open-source tool for automating network operations.

    With each Nornir worker capable of handling multiple devices simultaneously, Nornir Service offers high scalability, allowing efficient management of large device fleets. By optimizing compute resources such as CPU, RAM, and storage, it delivers cost-effective performance.

    Additionally, Nornir Service supports various interfaces and libraries for seamless integration. For instance, the cli task can interact with devices via the Command Line Interface (CLI) using popular libraries like Netmiko, Scrapli, and NAPALM, providing flexibility for diverse network environments.

    "},{"location":"nornir_worker_api_reference/","title":"Nornir Worker","text":""},{"location":"nornir_worker_api_reference/#norfab.workers.nornir_worker--nornir-worker-inventory-reference","title":"Nornir Worker Inventory Reference","text":"
    • watchdog_interval - watchdog run interval in seconds, default is 30
    • connections_idle_timeout - watchdog connection idle timeout, default is None - no timeout, connection always kept alive, if set to 0, connections disconnected imminently after task completed, if positive number, connection disconnected after not being used for over connections_idle_timeout
    "},{"location":"nornir_worker_api_reference/#norfab.workers.nornir_worker.WatchDog","title":"WatchDog(worker)","text":"

    Bases: WorkerWatchDog

    Class to monitor Nornir worker performance

    Source code in norfab\\workers\\nornir_worker.py
    def __init__(self, worker):\n    super().__init__(worker)\n    self.worker = worker\n    self.connections_idle_timeout = worker.inventory.get(\n        \"connections_idle_timeout\", None\n    )\n    self.connections_data = {}  # store connections use timestamps\n    self.started_at = time.time()\n\n    # stats attributes\n    self.idle_connections_cleaned = 0\n    self.dead_connections_cleaned = 0\n\n    # list of tasks for watchdog to run in given order\n    self.watchdog_tasks = [\n        self.connections_clean,\n        self.connections_keepalive,\n    ]\n
    "},{"location":"nornir_worker_api_reference/#norfab.workers.nornir_worker.WatchDog.connections_update","title":"connections_update(nr, plugin)","text":"

    Function to update connection use timestamps for each host

    Parameters:

    Name Type Description Default nr

    Nornir object

    required plugin str

    connection plugin name

    required Source code in norfab\\workers\\nornir_worker.py
    def connections_update(self, nr, plugin: str) -> None:\n    \"\"\"\n    Function to update connection use timestamps for each host\n\n    :param nr: Nornir object\n    :param plugin: connection plugin name\n    \"\"\"\n    conn_stats = {\n        \"last_use\": None,\n        \"last_keepealive\": None,\n        \"keepalive_count\": 0,\n    }\n    for host_name in nr.inventory.hosts:\n        self.connections_data.setdefault(host_name, {})\n        self.connections_data[host_name].setdefault(plugin, conn_stats.copy())\n        self.connections_data[host_name][plugin][\"last_use\"] = time.ctime()\n    log.info(\n        f\"{self.worker.name} - updated connections use timestamps for '{plugin}'\"\n    )\n
    "},{"location":"nornir_worker_api_reference/#norfab.workers.nornir_worker.WatchDog.connections_clean","title":"connections_clean()","text":"

    Check if need to tear down connections that are idle - not being used over connections_idle_timeout

    Source code in norfab\\workers\\nornir_worker.py
    def connections_clean(self):\n    \"\"\"\n    Check if need to tear down connections that are idle -\n    not being used over connections_idle_timeout\n    \"\"\"\n    # dictionary keyed by plugin name and value as a list of hosts\n    disconnect = {}\n    if not self.worker.connections_lock.acquire(blocking=False):\n        return\n    try:\n        # if idle timeout not set, connections don't age out\n        if self.connections_idle_timeout is None:\n            disconnect = {}\n        # disconnect all connections for all hosts\n        elif self.connections_idle_timeout == 0:\n            disconnect = {\"all\": list(self.connections_data.keys())}\n        # only disconnect aged/idle connections\n        elif self.connections_idle_timeout > 0:\n            for host_name, plugins in self.connections_data.items():\n                for plugin, conn_data in plugins.items():\n                    last_use = time.mktime(time.strptime(conn_data[\"last_use\"]))\n                    age = time.time() - last_use\n                    if age > self.connections_idle_timeout:\n                        disconnect.setdefault(plugin, [])\n                        disconnect[plugin].append(host_name)\n        # run task to disconnect connections for aged hosts\n        for plugin, hosts in disconnect.items():\n            if not hosts:\n                continue\n            aged_hosts = FFun(self.worker.nr, FL=hosts)\n            aged_hosts.run(task=nr_connections, call=\"close\", conn_name=plugin)\n            log.debug(\n                f\"{self.worker.name} watchdog, disconnected '{plugin}' \"\n                f\"connections for '{', '.join(hosts)}'\"\n            )\n            self.idle_connections_cleaned += len(hosts)\n            # wipe out connections data if all connection closed\n            if plugin == \"all\":\n                self.connections_data = {}\n                break\n            # remove disconnected plugin from host's connections_data\n            for host in hosts:\n                self.connections_data[host].pop(plugin)\n                if not self.connections_data[host]:\n                    self.connections_data.pop(host)\n    except Exception as e:\n        msg = f\"{self.worker.name} - watchdog failed to close idle connections, error: {e}\"\n        log.error(msg)\n    finally:\n        self.worker.connections_lock.release()\n
    "},{"location":"nornir_worker_api_reference/#norfab.workers.nornir_worker.WatchDog.connections_keepalive","title":"connections_keepalive()","text":"

    Keepalive connections and clean up dead connections if any

    Source code in norfab\\workers\\nornir_worker.py
    def connections_keepalive(self):\n    \"\"\"Keepalive connections and clean up dead connections if any\"\"\"\n    if self.connections_idle_timeout == 0:  # do not keepalive if idle is 0\n        return\n    if not self.worker.connections_lock.acquire(blocking=False):\n        return\n    try:\n        log.debug(f\"{self.worker.name} - watchdog running connections keepalive\")\n        stats = HostsKeepalive(self.worker.nr)\n        self.dead_connections_cleaned += stats[\"dead_connections_cleaned\"]\n        # update connections statistics\n        for plugins in self.connections_data.values():\n            for plugin in plugins.values():\n                plugin[\"last_keepealive\"] = time.ctime()\n                plugin[\"keepalive_count\"] += 1\n    except Exception as e:\n        msg = f\"{self.worker.name} - watchdog HostsKeepalive check error: {e}\"\n        log.error(msg)\n    finally:\n        self.worker.connections_lock.release()\n
    "},{"location":"nornir_worker_api_reference/#norfab.workers.nornir_worker.NornirWorker","title":"NornirWorker(broker, service, worker_name, exit_event=None, init_done_event=None, log_level='WARNING')","text":"

    Bases: NFPWorker

    Parameters:

    Name Type Description Default broker str

    broker URL to connect to

    required service str

    name of the service with worker belongs to

    required worker_name str

    name of this worker

    required exit_event

    if set, worker need to stop/exit

    None init_done_event

    event to set when worker done initializing

    None log_level str

    logging level of this worker

    'WARNING' Source code in norfab\\workers\\nornir_worker.py
    def __init__(\n    self,\n    broker: str,\n    service: str,\n    worker_name: str,\n    exit_event=None,\n    init_done_event=None,\n    log_level: str = \"WARNING\",\n):\n    super().__init__(broker, service, worker_name, exit_event, log_level)\n    self.init_done_event = init_done_event\n    self.tf_base_path = os.path.join(self.base_dir, \"tf\")\n\n    # misc attributes\n    self.connections_lock = Lock()\n\n    # get inventory from broker\n    self.inventory = self.load_inventory()\n\n    # pull Nornir inventory from Netbox\n    self._pull_netbox_inventory()\n\n    # initiate Nornir\n    self._init_nornir()\n\n    # initiate watchdog\n    self.watchdog = WatchDog(self)\n    self.watchdog.start()\n\n    self.init_done_event.set()\n    log.info(f\"{self.name} - Started\")\n
    "},{"location":"nornir_worker_api_reference/#norfab.workers.nornir_worker.NornirWorker.render_jinja2_templates","title":"render_jinja2_templates(templates, context, filters=None)","text":"

    helper function to render a list of Jinja2 templates

    Parameters:

    Name Type Description Default templates list[str]

    list of template strings to render

    required context dict

    Jinja2 context dictionary

    required filter

    custom Jinja2 filters

    required

    Returns:

    Type Description list[str]

    list of rendered strings

    Source code in norfab\\workers\\nornir_worker.py
    def render_jinja2_templates(\n    self, templates: list[str], context: dict, filters: dict = None\n) -> list[str]:\n    \"\"\"\n    helper function to render a list of Jinja2 templates\n\n    :param templates: list of template strings to render\n    :param context: Jinja2 context dictionary\n    :param filter: custom Jinja2 filters\n    :returns: list of rendered strings\n    \"\"\"\n    rendered = []\n    filters = filters or {}\n    for template in templates:\n        if template.startswith(\"nf://\"):\n            filepath = self.fetch_jinja2(template)\n            searchpath, filename = os.path.split(filepath)\n            j2env = Environment(loader=FileSystemLoader(searchpath))\n            renderer = j2env.get_template(filename)\n        else:\n            j2env = Environment(loader=\"BaseLoader\")\n            renderer = j2env.from_string(template)\n        j2env.filters.update(filters)  # add custom filters\n        rendered.append(renderer.render(**context))\n\n    return rendered\n
    "},{"location":"nornir_worker_api_reference/#norfab.workers.nornir_worker.NornirWorker.load_job_data","title":"load_job_data(job_data)","text":"

    Helper function to download job data and load it using YAML

    Parameters:

    Name Type Description Default job_data str

    URL to job data

    required Source code in norfab\\workers\\nornir_worker.py
    def load_job_data(self, job_data: str):\n    \"\"\"\n    Helper function to download job data and load it using YAML\n\n    :param job_data: URL to job data\n    \"\"\"\n    if self.is_url(job_data):\n        job_data = self.fetch_file(job_data)\n        if job_data is None:\n            msg = f\"{self.name} - '{job_data}' job data file download failed\"\n            raise FileNotFoundError(msg)\n        job_data = yaml.safe_load(job_data)\n\n    return job_data\n
    "},{"location":"nornir_worker_api_reference/#norfab.workers.nornir_worker.NornirWorker.get_nornir_hosts","title":"get_nornir_hosts(details=False, **kwargs)","text":"

    Produce a list of hosts managed by this worker

    Parameters:

    Name Type Description Default kwargs dict

    dictionary of nornir-salt Fx filters

    {} Source code in norfab\\workers\\nornir_worker.py
    def get_nornir_hosts(self, details: bool = False, **kwargs: dict) -> list:\n    \"\"\"\n    Produce a list of hosts managed by this worker\n\n    :param kwargs: dictionary of nornir-salt Fx filters\n    \"\"\"\n    filters = {k: kwargs.pop(k) for k in list(kwargs.keys()) if k in FFun_functions}\n    filtered_nornir = FFun(self.nr, **filters)\n    if details:\n        return Result(\n            result={\n                host_name: {\n                    \"platform\": str(host.platform),\n                    \"hostname\": str(host.hostname),\n                    \"port\": str(host.port),\n                    \"groups\": [str(g) for g in host.groups],\n                    \"username\": str(host.username),\n                }\n                for host_name, host in filtered_nornir.inventory.hosts.items()\n            }\n        )\n    else:\n        return Result(result=list(filtered_nornir.inventory.hosts))\n
    "},{"location":"nornir_worker_api_reference/#norfab.workers.nornir_worker.NornirWorker.get_nornir_inventory","title":"get_nornir_inventory(**kwargs)","text":"

    Retrieve running Nornir inventory for requested hosts

    Parameters:

    Name Type Description Default kwargs dict

    dictionary of nornir-salt Fx filters

    {} Source code in norfab\\workers\\nornir_worker.py
    def get_nornir_inventory(self, **kwargs: dict) -> dict:\n    \"\"\"\n    Retrieve running Nornir inventory for requested hosts\n\n    :param kwargs: dictionary of nornir-salt Fx filters\n    \"\"\"\n    filters = {k: kwargs.pop(k) for k in list(kwargs.keys()) if k in FFun_functions}\n    filtered_nornir = FFun(self.nr, **filters)\n    return Result(\n        result=filtered_nornir.inventory.dict(), task=\"get_nornir_inventory\"\n    )\n
    "},{"location":"nornir_worker_api_reference/#norfab.workers.nornir_worker.NornirWorker.get_nornir_version","title":"get_nornir_version()","text":"

    Produce Python packages version report

    Source code in norfab\\workers\\nornir_worker.py
    def get_nornir_version(self):\n    \"\"\"\n    Produce Python packages version report\n    \"\"\"\n    libs = {\n        \"scrapli\": \"\",\n        \"scrapli-netconf\": \"\",\n        \"scrapli-community\": \"\",\n        \"paramiko\": \"\",\n        \"netmiko\": \"\",\n        \"napalm\": \"\",\n        \"nornir\": \"\",\n        \"ncclient\": \"\",\n        \"nornir-netmiko\": \"\",\n        \"nornir-napalm\": \"\",\n        \"nornir-scrapli\": \"\",\n        \"nornir-utils\": \"\",\n        \"tabulate\": \"\",\n        \"xmltodict\": \"\",\n        \"puresnmp\": \"\",\n        \"pygnmi\": \"\",\n        \"pyyaml\": \"\",\n        \"jmespath\": \"\",\n        \"jinja2\": \"\",\n        \"ttp\": \"\",\n        \"nornir-salt\": \"\",\n        \"lxml\": \"\",\n        \"ttp-templates\": \"\",\n        \"ntc-templates\": \"\",\n        \"cerberus\": \"\",\n        \"pydantic\": \"\",\n        \"requests\": \"\",\n        \"textfsm\": \"\",\n        \"N2G\": \"\",\n        \"dnspython\": \"\",\n        \"pythonping\": \"\",\n        \"python\": sys.version.split(\" \")[0],\n        \"platform\": sys.platform,\n    }\n    # get version of packages installed\n    for pkg in libs.keys():\n        try:\n            libs[pkg] = importlib.metadata.version(pkg)\n        except importlib.metadata.PackageNotFoundError:\n            pass\n\n    return Result(result=libs)\n
    "},{"location":"nornir_worker_api_reference/#norfab.workers.nornir_worker.NornirWorker.task","title":"task(plugin, **kwargs)","text":"

    Function to invoke any of supported Nornir task plugins. This function performs dynamic import of requested plugin function and executes nr.run using supplied args and kwargs

    plugin attribute can refer to a file to fetch from file service. File must contain function named task accepting Nornir task object as a first positional argument, for example:

    # define connection name for RetryRunner to properly detect it\nCONNECTION_NAME = \"netmiko\"\n\n# create task function\ndef task(nornir_task_object, *args, **kwargs):\n    pass\n

    CONNECTION_NAME

    CONNECTION_NAME must be defined within custom task function file if RetryRunner in use, otherwise connection retry logic skipped and connections to all hosts initiated simultaneously up to the number of num_workers.

    Parameters:

    Name Type Description Default plugin str

    (str) path.to.plugin.task_fun to import or nf://path/to/task.py to download custom task

    required kwargs

    (dict) arguments to use with specified task plugin

    {} Source code in norfab\\workers\\nornir_worker.py
    def task(self, plugin: str, **kwargs) -> Result:\n    \"\"\"\n    Function to invoke any of supported Nornir task plugins. This function\n    performs dynamic import of requested plugin function and executes\n    ``nr.run`` using supplied args and kwargs\n\n    ``plugin`` attribute can refer to a file to fetch from file service. File must contain\n    function named ``task`` accepting Nornir task object as a first positional\n    argument, for example:\n\n    ```python\n    # define connection name for RetryRunner to properly detect it\n    CONNECTION_NAME = \"netmiko\"\n\n    # create task function\n    def task(nornir_task_object, *args, **kwargs):\n        pass\n    ```\n\n    !!! note \"CONNECTION_NAME\"\n\n        ``CONNECTION_NAME`` must be defined within custom task function file if\n        RetryRunner in use, otherwise connection retry logic skipped and connections\n        to all hosts initiated simultaneously up to the number of ``num_workers``.\n\n    :param plugin: (str) ``path.to.plugin.task_fun`` to import or ``nf://path/to/task.py``\n        to download custom task\n    :param kwargs: (dict) arguments to use with specified task plugin\n    \"\"\"\n    # extract attributes\n    add_details = kwargs.pop(\"add_details\", False)  # ResultSerializer\n    to_dict = kwargs.pop(\"to_dict\", True)  # ResultSerializer\n    filters = {k: kwargs.pop(k) for k in list(kwargs.keys()) if k in FFun_functions}\n    ret = Result(task=f\"{self.name}:task\", result={} if to_dict else [])\n\n    # download task from broker and load it\n    if plugin.startswith(\"nf://\"):\n        function_text = self.fetch_file(plugin)\n        if function_text is None:\n            raise FileNotFoundError(\n                f\"{self.name} - '{plugin}' task plugin download failed\"\n            )\n\n        # load task function running exec\n        globals_dict = {}\n        exec(function_text, globals_dict, globals_dict)\n        task_function = globals_dict[\"task\"]\n    # import task function\n    else:\n        # below same as \"from nornir.plugins.tasks import task_fun as task_function\"\n        task_fun = plugin.split(\".\")[-1]\n        module = __import__(plugin, fromlist=[\"\"])\n        task_function = getattr(module, task_fun)\n\n    self.nr.data.reset_failed_hosts()  # reset failed hosts\n    filtered_nornir = FFun(self.nr, **filters)  # filter hosts\n\n    # check if no hosts matched\n    if not filtered_nornir.inventory.hosts:\n        msg = (\n            f\"{self.name} - nothing to do, no hosts matched by filters '{filters}'\"\n        )\n        log.debug(msg)\n        ret.messages.append(msg)\n        return ret\n\n    nr = self._add_processors(filtered_nornir, kwargs)  # add processors\n\n    # run task\n    log.debug(f\"{self.name} - running Nornir task '{plugin}', kwargs '{kwargs}'\")\n    with self.connections_lock:\n        result = nr.run(task=task_function, **kwargs)\n    ret.result = ResultSerializer(result, to_dict=to_dict, add_details=add_details)\n\n    self.watchdog.connections_clean()\n\n    return ret\n
    "},{"location":"nornir_worker_api_reference/#norfab.workers.nornir_worker.NornirWorker.cli","title":"cli(commands=None, plugin='netmiko', cli_dry_run=False, run_ttp=None, job_data=None, to_dict=True, add_details=False, **kwargs)","text":"

    Function to collect show commands output from devices using Command Line Interface (CLI)

    Parameters:

    Name Type Description Default commands list

    list of commands to send to devices

    None plugin str

    plugin name to use - netmiko, scrapli, napalm

    'netmiko' cli_dry_run bool

    do not send commands to devices just return them

    False job_data str

    URL to YAML file with data or dictionary/list of data to pass on to Jinja2 rendering context

    None add_details bool

    if True will add task execution details to the results

    False to_dict bool

    default is True - produces dictionary results, if False will produce results list

    True run_ttp str

    TTP Template to run

    None Source code in norfab\\workers\\nornir_worker.py
    def cli(\n    self,\n    commands: list = None,\n    plugin: str = \"netmiko\",\n    cli_dry_run: bool = False,\n    run_ttp: str = None,\n    job_data: str = None,\n    to_dict: bool = True,\n    add_details: bool = False,\n    **kwargs,\n) -> dict:\n    \"\"\"\n    Function to collect show commands output from devices using\n    Command Line Interface (CLI)\n\n    :param commands: list of commands to send to devices\n    :param plugin: plugin name to use - ``netmiko``, ``scrapli``, ``napalm``\n    :param cli_dry_run: do not send commands to devices just return them\n    :param job_data: URL to YAML file with data or dictionary/list of data\n        to pass on to Jinja2 rendering context\n    :param add_details: if True will add task execution details to the results\n    :param to_dict: default is True - produces dictionary results, if False\n        will produce results list\n    :param run_ttp: TTP Template to run\n    \"\"\"\n    job_data = job_data or {}\n    filters = {k: kwargs.pop(k) for k in list(kwargs.keys()) if k in FFun_functions}\n    downloaded_cmds = []\n    timeout = self.current_job[\"timeout\"] * 0.9\n    ret = Result(task=f\"{self.name}:cli\", result={} if to_dict else [])\n\n    # decide on what send commands task plugin to use\n    if plugin == \"netmiko\":\n        task_plugin = netmiko_send_commands\n        if kwargs.get(\"use_ps\"):\n            kwargs.setdefault(\"timeout\", timeout)\n        else:\n            kwargs.setdefault(\"read_timeout\", timeout)\n    elif plugin == \"scrapli\":\n        task_plugin = scrapli_send_commands\n        kwargs.setdefault(\"timeout_ops\", timeout)\n    elif plugin == \"napalm\":\n        task_plugin = napalm_send_commands\n    else:\n        raise UnsupportedPluginError(f\"Plugin '{plugin}' not supported\")\n\n    self.nr.data.reset_failed_hosts()  # reset failed hosts\n    filtered_nornir = FFun(self.nr, **filters)  # filter hosts\n\n    # check if no hosts matched\n    if not filtered_nornir.inventory.hosts:\n        msg = (\n            f\"{self.name} - nothing to do, no hosts matched by filters '{filters}'\"\n        )\n        log.debug(msg)\n        ret.messages.append(msg)\n        return ret\n\n    # download TTP template\n    if self.is_url(run_ttp):\n        downloaded = self.fetch_file(run_ttp)\n        kwargs[\"run_ttp\"] = downloaded\n        if downloaded is None:\n            msg = f\"{self.name} - TTP template download failed '{run_ttp}'\"\n            raise FileNotFoundError(msg)\n    # use TTP template as is - inline template or ttp://xyz path\n    elif run_ttp:\n        kwargs[\"run_ttp\"] = run_ttp\n\n    # download job data\n    job_data = self.load_job_data(job_data)\n\n    nr = self._add_processors(filtered_nornir, kwargs)  # add processors\n\n    # render commands using Jinja2 on a per-host basis\n    if commands:\n        commands = commands if isinstance(commands, list) else [commands]\n        for host in nr.inventory.hosts.values():\n            rendered = self.render_jinja2_templates(\n                templates=commands,\n                context={\n                    \"host\": host,\n                    \"norfab\": self.client,\n                    \"nornir\": self,\n                    \"job_data\": job_data,\n                },\n            )\n            host.data[\"__task__\"] = {\"commands\": rendered}\n\n    # run task\n    log.debug(\n        f\"{self.name} - running cli commands '{commands}', kwargs '{kwargs}', is cli dry run - '{cli_dry_run}'\"\n    )\n    if cli_dry_run is True:\n        result = nr.run(\n            task=nr_test, use_task_data=\"commands\", name=\"cli_dry_run\", **kwargs\n        )\n    else:\n        with self.connections_lock:\n            result = nr.run(task=task_plugin, **kwargs)\n\n    ret.result = ResultSerializer(result, to_dict=to_dict, add_details=add_details)\n\n    # remove __task__ data\n    for host_name, host_object in nr.inventory.hosts.items():\n        _ = host_object.data.pop(\"__task__\", None)\n\n    self.watchdog.connections_update(nr, plugin)\n    self.watchdog.connections_clean()\n\n    return ret\n
    "},{"location":"nornir_worker_api_reference/#norfab.workers.nornir_worker.NornirWorker.nb_get_next_ip","title":"nb_get_next_ip(*args, **kwargs)","text":"

    Method to query next available IP address from Netbox service

    Source code in norfab\\workers\\nornir_worker.py
    def nb_get_next_ip(self, *args, **kwargs):\n    \"\"\"Method to query next available IP address from Netbox service\"\"\"\n    reply = self.client.run_job(\n        \"netbox\",\n        \"get_next_ip\",\n        args=args,\n        kwargs=kwargs,\n        workers=\"any\",\n        timeout=30,\n    )\n    # reply is a dict of {worker_name: results_dict}\n    result = list(reply.values())[0]\n\n    return result[\"result\"]\n
    "},{"location":"nornir_worker_api_reference/#norfab.workers.nornir_worker.NornirWorker.cfg","title":"cfg(config, plugin='netmiko', cfg_dry_run=False, to_dict=True, add_details=False, job_data=None, **kwargs)","text":"

    Function to send configuration commands to devices using Command Line Interface (CLI)

    Parameters:

    Name Type Description Default config list

    list of commands to send to devices

    required plugin str

    plugin name to use - netmiko, scrapli, napalm

    'netmiko' cfg_dry_run bool

    do not send commands to devices just return them

    False job_data str

    URL to YAML file with data or dictionary/list of data to pass on to Jinja2 rendering context

    None add_details bool

    if True will add task execution details to the results

    False to_dict bool

    default is True - produces dictionary results, if False will produce results list

    True Source code in norfab\\workers\\nornir_worker.py
    def cfg(\n    self,\n    config: list,\n    plugin: str = \"netmiko\",\n    cfg_dry_run: bool = False,\n    to_dict: bool = True,\n    add_details: bool = False,\n    job_data: str = None,\n    **kwargs,\n) -> dict:\n    \"\"\"\n    Function to send configuration commands to devices using\n    Command Line Interface (CLI)\n\n    :param config: list of commands to send to devices\n    :param plugin: plugin name to use - ``netmiko``, ``scrapli``, ``napalm``\n    :param cfg_dry_run: do not send commands to devices just return them\n    :param job_data: URL to YAML file with data or dictionary/list of data\n        to pass on to Jinja2 rendering context\n    :param add_details: if True will add task execution details to the results\n    :param to_dict: default is True - produces dictionary results, if False\n        will produce results list\n    \"\"\"\n    downloaded_cfg = []\n    config = config if isinstance(config, list) else [config]\n    filters = {k: kwargs.pop(k) for k in list(kwargs.keys()) if k in FFun_functions}\n    ret = Result(task=f\"{self.name}:cfg\", result={} if to_dict else [])\n    timeout = self.current_job[\"timeout\"]\n\n    # decide on what send commands task plugin to use\n    if plugin == \"netmiko\":\n        task_plugin = netmiko_send_config\n    elif plugin == \"scrapli\":\n        task_plugin = scrapli_send_config\n    elif plugin == \"napalm\":\n        task_plugin = napalm_configure\n    else:\n        raise UnsupportedPluginError(f\"Plugin '{plugin}' not supported\")\n\n    self.nr.data.reset_failed_hosts()  # reset failed hosts\n    filtered_nornir = FFun(self.nr, **filters)  # filter hosts\n\n    # check if no hosts matched\n    if not filtered_nornir.inventory.hosts:\n        msg = (\n            f\"{self.name} - nothing to do, no hosts matched by filters '{filters}'\"\n        )\n        ret.messages.append(msg)\n        log.debug(msg)\n        return ret\n\n    job_data = self.load_job_data(job_data)\n\n    nr = self._add_processors(filtered_nornir, kwargs)  # add processors\n\n    # render config using Jinja2 on a per-host basis\n    for host in nr.inventory.hosts.values():\n        rendered = self.render_jinja2_templates(\n            templates=config,\n            context={\n                \"host\": host,\n                \"norfab\": self.client,\n                \"nornir\": self,\n                \"job_data\": job_data,\n            },\n            filters={\"nb_get_next_ip\": self.nb_get_next_ip},\n        )\n        host.data[\"__task__\"] = {\"config\": rendered}\n\n    # run task\n    log.debug(\n        f\"{self.name} - sending config commands '{config}', kwargs '{kwargs}', is cfg_dry_run - '{cfg_dry_run}'\"\n    )\n    if cfg_dry_run is True:\n        result = nr.run(\n            task=nr_test, use_task_data=\"config\", name=\"cfg_dry_run\", **kwargs\n        )\n    else:\n        with self.connections_lock:\n            result = nr.run(task=task_plugin, **kwargs)\n        ret.changed = True\n\n    ret.result = ResultSerializer(result, to_dict=to_dict, add_details=add_details)\n\n    # remove __task__ data\n    for host_name, host_object in nr.inventory.hosts.items():\n        _ = host_object.data.pop(\"__task__\", None)\n\n    self.watchdog.connections_update(nr, plugin)\n    self.watchdog.connections_clean()\n\n    return ret\n
    "},{"location":"nornir_worker_api_reference/#norfab.workers.nornir_worker.NornirWorker.test","title":"test(suite, subset=None, dry_run=False, remove_tasks=True, failed_only=False, return_tests_suite=False, job_data=None, **kwargs)","text":"

    Function to tests data obtained from devices.

    Parameters:

    Name Type Description Default suite Union[list, str]

    path to YAML file with tests

    required dry_run bool

    if True, returns produced per-host tests suite content only

    False subset str

    list or string with comma separated non case sensitive glob patterns to filter tests' by name, subset argument ignored by dry run

    None failed_only bool

    if True returns test results for failed tests only

    False remove_tasks bool

    if False results will include other tasks output

    True return_tests_suite bool

    if True returns rendered per-host tests suite content in addition to test results using dictionary with results and suite keys

    False job_data str

    URL to YAML file with data or dictionary/list of data to pass on to Jinja2 rendering context

    None kwargs

    any additional arguments to pass on to Nornir service task

    {} Source code in norfab\\workers\\nornir_worker.py
    def test(\n    self,\n    suite: Union[list, str],\n    subset: str = None,\n    dry_run: bool = False,\n    remove_tasks: bool = True,\n    failed_only: bool = False,\n    return_tests_suite: bool = False,\n    job_data: str = None,\n    **kwargs,\n) -> dict:\n    \"\"\"\n    Function to tests data obtained from devices.\n\n    :param suite: path to YAML file with tests\n    :param dry_run: if True, returns produced per-host tests suite content only\n    :param subset: list or string with comma separated non case sensitive glob\n        patterns to filter tests' by name, subset argument ignored by dry run\n    :param failed_only: if True returns test results for failed tests only\n    :param remove_tasks: if False results will include other tasks output\n    :param return_tests_suite: if True returns rendered per-host tests suite\n        content in addition to test results using dictionary with ``results``\n        and ``suite`` keys\n    :param job_data: URL to YAML file with data or dictionary/list of data\n        to pass on to Jinja2 rendering context\n    :param kwargs: any additional arguments to pass on to Nornir service task\n    \"\"\"\n    downloaded_suite = None\n    tests = {}  # dictionary to hold per-host test suites\n    add_details = kwargs.get(\"add_details\", False)  # ResultSerializer\n    to_dict = kwargs.get(\"to_dict\", True)  # ResultSerializer\n    filters = {k: kwargs.pop(k) for k in list(kwargs.keys()) if k in FFun_functions}\n    ret = Result(task=f\"{self.name}:test\", result={} if to_dict else [])\n    suites = {}  # dictionary to hold combined test suites\n\n    self.nr.data.reset_failed_hosts()  # reset failed hosts\n    filtered_nornir = FFun(self.nr, **filters)  # filter hosts\n\n    # check if no hosts matched\n    if not filtered_nornir.inventory.hosts:\n        msg = (\n            f\"{self.name} - nothing to do, no hosts matched by filters '{filters}'\"\n        )\n        log.debug(msg)\n        ret.messages.append(msg)\n        if return_tests_suite is True:\n            ret.result = {\"test_results\": [], \"suite\": {}}\n        return ret\n\n    # download tests suite\n    downloaded_suite = self.fetch_jinja2(suite)\n\n    # download job data\n    job_data = self.load_job_data(job_data)\n\n    # generate per-host test suites\n    searchpath, template = os.path.split(downloaded_suite)\n    for host_name, host in filtered_nornir.inventory.hosts.items():\n        context = {\n            \"host\": host,\n            \"norfab\": self.client,\n            \"nornir\": self,\n            \"job_data\": job_data,\n        }\n        # render suite using Jinja2\n        try:\n            j2env = Environment(loader=FileSystemLoader(searchpath))\n            renderer = j2env.get_template(template)\n            rendered_suite = renderer.render(**context)\n        except Exception as e:\n            msg = f\"{self.name} - '{suite}' Jinja2 rendering failed: '{e}'\"\n            raise RuntimeError(msg)\n        # load suit using YAML\n        try:\n            tests[host_name] = yaml.safe_load(rendered_suite)\n        except Exception as e:\n            msg = f\"{self.name} - '{suite}' YAML load failed: '{e}'\"\n            raise RuntimeError(msg)\n\n    # validate tests suite\n    try:\n        _ = modelTestsProcessorSuite(tests=tests)\n    except Exception as e:\n        msg = f\"{self.name} - '{suite}' suite validation failed: '{e}'\"\n        raise RuntimeError(msg)\n\n    # download pattern, schema and custom function files\n    for host_name in tests.keys():\n        for index, item in enumerate(tests[host_name]):\n            for k in [\"pattern\", \"schema\", \"function_file\"]:\n                if self.is_url(item.get(k)):\n                    item[k] = self.fetch_file(\n                        item[k], raise_on_fail=True, read=True\n                    )\n                    if k == \"function_file\":\n                        item[\"function_text\"] = item.pop(k)\n            tests[host_name][index] = item\n\n    # save per-host tests suite content before mutating it\n    if return_tests_suite is True:\n        return_suite = copy.deepcopy(tests)\n\n    log.debug(f\"{self.name} - running test '{suite}', is dry run - '{dry_run}'\")\n    # run dry run task\n    if dry_run is True:\n        result = filtered_nornir.run(\n            task=nr_test, name=\"tests_dry_run\", ret_data_per_host=tests\n        )\n        ret.result = ResultSerializer(\n            result, to_dict=to_dict, add_details=add_details\n        )\n    # combine per-host tests in suites based on task task and arguments\n    # Why - to run tests using any nornir service tasks with various arguments\n    else:\n        for host_name, host_tests in tests.items():\n            for test in host_tests:\n                dhash = hashlib.md5()\n                test_args = test.pop(\"norfab\", {})\n                nrtask = test_args.get(\"nrtask\", \"cli\")\n                assert nrtask in [\n                    \"cli\",\n                    \"network\",\n                    \"cfg\",\n                    \"task\",\n                ], f\"{self.name} - unsupported NorFab Nornir Service task '{nrtask}'\"\n                test_json = json.dumps(test_args, sort_keys=True).encode()\n                dhash.update(test_json)\n                test_hash = dhash.hexdigest()\n                suites.setdefault(test_hash, {\"params\": test_args, \"tests\": {}})\n                suites[test_hash][\"tests\"].setdefault(host_name, [])\n                suites[test_hash][\"tests\"][host_name].append(test)\n        log.debug(\n            f\"{self.name} - combined per-host tests suites based on NorFab Nornir Service task and arguments:\\n{suites}\"\n        )\n        # run test suites collecting output from devices\n        for tests_suite in suites.values():\n            nrtask = tests_suite[\"params\"].pop(\"nrtask\", \"cli\")\n            function_kwargs = {\n                **tests_suite[\"params\"],\n                **kwargs,\n                **filters,\n                \"tests\": tests_suite[\"tests\"],\n                \"remove_tasks\": remove_tasks,\n                \"failed_only\": failed_only,\n                \"subset\": subset,\n            }\n            result = getattr(self, nrtask)(\n                **function_kwargs\n            )  # returns Result object\n            # save test results into overall results\n            if to_dict == True:\n                for host_name, host_res in result.result.items():\n                    ret.result.setdefault(host_name, {})\n                    ret.result[host_name].update(host_res)\n            else:\n                ret.result.extend(result.result)\n\n    # check if need to return tests suite content\n    if return_tests_suite is True:\n        ret.result = {\"test_results\": ret.result, \"suite\": return_suite}\n\n    return ret\n
    "},{"location":"nornir_worker_api_reference/#norfab.workers.nornir_worker.NornirWorker.network","title":"network(fun, **kwargs)","text":"

    Function to call various network related utility functions.

    Parameters:

    Name Type Description Default fun

    (str) utility function name to call

    required kwargs

    (dict) function arguments Available utility functions. resolve_dns function resolves hosts' hostname DNS returning IP addresses using nornir_salt.plugins.tasks.network.resolve_dns Nornir-Salt function. ping function Function to execute ICMP ping to host using nornir_salt.plugins.tasks.network.ping Nornir-Salt function.

    {} Source code in norfab\\workers\\nornir_worker.py
    def network(self, fun, **kwargs) -> dict:\n    \"\"\"\n    Function to call various network related utility functions.\n\n    :param fun: (str) utility function name to call\n    :param kwargs: (dict) function arguments\n\n    Available utility functions.\n\n    **resolve_dns** function\n\n    resolves hosts' hostname DNS returning IP addresses using\n    ``nornir_salt.plugins.tasks.network.resolve_dns`` Nornir-Salt\n    function.\n\n    **ping** function\n\n    Function to execute ICMP ping to host using\n    ``nornir_salt.plugins.tasks.network.ping`` Nornir-Salt\n    function.\n    \"\"\"\n    kwargs[\"call\"] = fun\n    return self.task(\n        plugin=\"nornir_salt.plugins.tasks.network\",\n        **kwargs,\n    )\n
    "},{"location":"nornir_worker_api_reference/#norfab.workers.nornir_worker.NornirWorker.parse","title":"parse(plugin='napalm', getters='get_facts', template=None, commands=None, to_dict=True, add_details=False, **kwargs)","text":"

    Function to parse network devices show commands output

    Parameters:

    Name Type Description Default plugin str

    plugin name to use - napalm, textfsm, ttp

    'napalm' getters str

    NAPALM getters to use

    'get_facts' commands list

    commands to send to devices for TextFSM or TTP template

    None template str

    TextFSM or TTP parsing template string or path to file For NAPALM plugin method can refer to a list of getters names.

    None Source code in norfab\\workers\\nornir_worker.py
    def parse(\n    self,\n    plugin: str = \"napalm\",\n    getters: str = \"get_facts\",\n    template: str = None,\n    commands: list = None,\n    to_dict: bool = True,\n    add_details: bool = False,\n    **kwargs,\n):\n    \"\"\"\n    Function to parse network devices show commands output\n\n    :param plugin: plugin name to use - ``napalm``, ``textfsm``, ``ttp``\n    :param getters: NAPALM getters to use\n    :param commands: commands to send to devices for TextFSM or TTP template\n    :param template: TextFSM or TTP parsing template string or path to file\n\n    For NAPALM plugin ``method`` can refer to a list of getters names.\n    \"\"\"\n    filters = {k: kwargs.pop(k) for k in list(kwargs.keys()) if k in FFun_functions}\n    ret = Result(task=f\"{self.name}:parse\", result={} if to_dict else [])\n\n    self.nr.data.reset_failed_hosts()  # reset failed hosts\n    filtered_nornir = FFun(self.nr, **filters)  # filter hosts\n\n    # check if no hosts matched\n    if not filtered_nornir.inventory.hosts:\n        msg = (\n            f\"{self.name} - nothing to do, no hosts matched by filters '{filters}'\"\n        )\n        ret.messages.append(msg)\n        log.debug(msg)\n        return ret\n\n    if plugin == \"napalm\":\n        nr = self._add_processors(filtered_nornir, kwargs)  # add processors\n        result = nr.run(task=napalm_get, getters=getters, **kwargs)\n        ret.result = ResultSerializer(\n            result, to_dict=to_dict, add_details=add_details\n        )\n    elif plugin == \"ttp\":\n        result = self.cli(\n            commands=commands or [],\n            run_ttp=template,\n            **filters,\n            **kwargs,\n            to_dict=to_dict,\n            add_details=add_details,\n            plugin=\"netmiko\",\n        )\n        ret.result = result.result\n    elif plugin == \"textfsm\":\n        result = self.cli(\n            commands=commands,\n            **filters,\n            **kwargs,\n            to_dict=to_dict,\n            add_details=add_details,\n            use_textfsm=True,\n            textfsm_template=template,\n            plugin=\"netmiko\",\n        )\n        ret.result = result.result\n    else:\n        raise UnsupportedPluginError(f\"Plugin '{plugin}' not supported\")\n\n    return ret\n
    "}]} \ No newline at end of file +{"config":{"lang":["en"],"separator":"[\\s\\-]+","pipeline":["stopWordFilter"]},"docs":[{"location":"","title":"Overview","text":"

    Through lifting others we rise

    "},{"location":"#network-automations-fabric","title":"Network Automations Fabric","text":"

    Hi , thank you for getting here.

    "},{"location":"#why-the-story","title":"Why (The Story)","text":"

    In a world devoid of network automations, the streets were silent and grey. Without network automations, network engineers' lives became a grueling cycle of manual configurations and endless troubleshooting. They spent hours accessing each and every device, manually configuring and patching systems. Nights were sleepless, filled with frantic calls to resolve outages that could no longer be preemptively detected or resolved. Collaboration was slow, relying on face-to-face meetings and manual documentation. Overwhelmed and exhausted, their innovative spirit was stifled by the sheer volume of repetitive tasks...

    Let us introduce you to the world of Network Automations Fabric.

    "},{"location":"#what-the-idea","title":"What (The Idea)","text":"

    NorFab purpose is to augment engineers capabilities with automation superpowers.

    Most of the software to manage networks falls into one of the two categories:

    • heavyweight platforms running on dedicated infrastructure
    • lightweight scripts or tools developed and run locally

    NorFab can be both - software you can run equally well from your laptop or on a server, centralized or fully distributed, lightweight and feature reach. Capable of doing any use cases without the need to throw gazillions of dollars and man hours at it. Always ready to serve the purpose of unlocking engineers superpowers managing modern network and making their life better.

    "},{"location":"#how-the-features","title":"How (The Features)","text":"
    • Run Anywhere - locally on Windows, MAC or Linux, in a container, on a VM, in the cloud, centralized or distributed
    • Extend Anything - extendability is in the core of NorFab
    • Integrate with Everything - Python API, REST API, CLI northbound interfaces
    • Manage Anything - develop your own services or use built-in to manage your network infrastructure
    • Model and data driven - Pydantic models for API, validation and documentation
    • Automate Anything - we mean it, sky is the limit on what you can do with NorFab automating your networks
    "},{"location":"#architecture","title":"Architecture","text":"

    Key actors of the system include

    • WORKERS - form services, processes that run anywhere and act as resource proxy agents
    • CLIENTS - consume services, processes that run on client machine and connect to broker
    • BROKER - provides access to services for clients
    • RESOURCES - entities managed by workers, e.g. network devices, databases, file system
    • SERVICES - a collection of workers and managed resources

    Clients communicate with broker to submit jobs, broker distributes jobs across workers comprising the service, workers run jobs producing results later on retrieved by clients. In other words Services hosted by Workers and expose functionality consumed by Clients via Broker.

    "},{"location":"NFP/","title":"NORFAB Protocol","text":"

    Status: experimental Editor: d.mulyalin@gmail.com Contributors:

    The NORFAB Protocol (NFP) defines a reliable service-oriented request-reply dialog between a set of client applications, a broker and a set of worker applications representing service managing a set of resources.

    NFP covers presence, heartbeating, and service-resource-oriented request-reply processing. NFP originated from the MDP pattern defined in Chapter 4 of the ZeroMQ Guide and combined with TSP pattern (developed in same chapter) approach for persistent messaging across a network of arbitrarily connected clients and workers as a design for disk-based reliable messaging. NORFAB allows clients and workers to work without being connected to the network at the same time, and defines handshaking for safe storage of requests, and retrieval of replies.

    "},{"location":"NFP/#license","title":"License","text":"

    Copyright (c) 2024 Denis Mulyalin.

    This Specification is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version.

    This Specification is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.

    You should have received a copy of the GNU General Public License along with this program; if not, see http://www.gnu.org/licenses.

    "},{"location":"NFP/#change-process","title":"Change Process","text":"

    This Specification is a free and open standard (see \u201cDefinition of a Free and Open Standard\") and is governed by the Digital Standards Organization\u2019s Consensus-Oriented Specification System (COSS) (see \u201cConsensus Oriented Specification System\").

    "},{"location":"NFP/#language","title":"Language","text":"

    The key words \u201cMUST\u201d, \u201cMUST NOT\u201d, \u201cREQUIRED\u201d, \u201cSHALL\u201d, \u201cSHALL NOT\u201d, \u201cSHOULD\u201d, \u201cSHOULD NOT\u201d, \u201cRECOMMENDED\u201d, \u201cMAY\u201d, and \u201cOPTIONAL\u201d in this document are to be interpreted as described in RFC 2119 (see \u201cKey words for use in RFCs to Indicate Requirement Levels\").

    "},{"location":"NFP/#goals","title":"Goals","text":"

    The NORFAB Protocol (NFP) defines a reliable service-resource-oriented request-reply dialog between a set of client applications, a broker and a set of worker applications. NFP covers presence, heartbeating, and service-oriented request-reply processing.

    NFP uses name-based service resolution, named based resource targeting and structured protocol commands.

    The goals of NFP are to:

    • Allow requests to be routed to workers on the basis of abstract service names.
    • Allow broker and workers to detect disconnection of one another, through the use of heartbeating.
    • ALlow task distribution by clients targeting all (broadcast), any (anycast) or unicast certain workers by names within given service.
    • Allow the broker to recover from dead or disconnected workers by re-sending requests to other workers.
    • Allow workers to manage resource entities, where entities can be dynamically distributed across all workers within the service.
    • Allow workers to have access to inventory data hosted by broker
    "},{"location":"NFP/#architecture","title":"Architecture","text":""},{"location":"NFP/#overall-topology","title":"Overall Topology","text":"

    NFP connects a set of client applications, a single broker device and a pool of workers applications. Clients connect to the broker, as do workers. Clients and workers do not see each other, and both can come and go arbitrarily. The broker MAY open two sockets (ports), one front-end for clients, and one back-end for workers. However NFP is also designed to work over a single broker socket.

    We define \u2018client\u2019 applications as those issuing requests, and \u2018worker\u2019 applications as those processing them. NFP makes these assumptions:

    • Workers are idempotent, i.e. it is safe to execute the same request more than once.
    • Workers will handle at most one request a time, and will issue exactly one reply for each successful request.
    • The NORFAB broker mediates requests one a per service basis. The broker SHOULD serve clients on a fair basis and SHOULD deliver requests to workers on the basis of targeting specified by client - any worker, all workers or unicast worker identified by name.

    NFP consists of four sub-protocols:

    • NFP/Client, which covers how the NFP broker communicates with client applications.
    • NFP/Worker, which covers how the NFP broker communicates with workers applications.
    • NFP/Worker-PUB, which covers how broker subscribes to events published by workers.
    • NFP/Broker-PUB, which covers how broker publishes collected worker events to clients.

    The broker SHOULD be an intermediary (a device) application that mediates Client-Workers communication. The broker SHOULD integrate Management Interface (MMI) service directly into it together with simple disk based Inventory service for workers.

    "},{"location":"NFP/#router-addressing","title":"ROUTER Addressing","text":"

    The broker MUST use a ROUTER socket to accept requests from clients, and connections from workers. The broker MAY use a separate socket for each sub-protocol, or MAY use a single socket for both sub-protocols.

    From the \u00d8MQ Reference Manual:

    When receiving messages a ROUTER socket shall prepend a message part containing the identity of the originating peer to the message before passing it to the application. When sending messages a ROUTER socket shall remove the first part of the message and use it to determine the identity of the peer the message shall be routed to.

    This extra frame is not shown in the sub-protocol commands explained below.

    "},{"location":"NFP/#nfp-messages","title":"NFP messages","text":""},{"location":"NFP/#open","title":"OPEN","text":"

    A OPEN command consists of 4 frames, formatted on the wire as follows:

    OPEN command\n---------------------------------------------------------------\nFrame 0: Empty frame\nFrame 1: \u201cNFPC01\u201d or \u201cNFPW01\u201d or \u201cNFPB01\u201d (six bytes, representing NFP/Client or NFP/Worker or NFP/Broker v0.1)\nFrame 2: 0x00 (one byte, representing OPEN)\nFrame 3: Open body (opaque binary)\n

    Worker and client use OPEN message to introduce itself to broker to negotiate connection parameters. Broker sends OPEN message back to client or worker to confirm the connection.

    "},{"location":"NFP/#ready","title":"READY","text":"

    A READY command consists of a multipart message of 4 frames, formatted on the wire as follows:

    READY command\n---------------------------------------------------------------\nFrame 0: Empty frame\nFrame 1: \u201cNFPW01\u201d (six bytes, representing NFP/Worker v0.1)\nFrame 2: 0x01 (one byte, representing READY)\nFrame 3: Service name (printable string)\n

    Worker sends READY command to broker, broker accepts ready request and registers worker with a service.

    "},{"location":"NFP/#keepalive","title":"KEEPALIVE","text":"

    A KEEPALIVE command consists of 4 frames, formatted on the wire as follows:

    KEEPALIVE command\n---------------------------------------------------------------\nFrame 0: Empty frame\nFrame 1: \u201cNFPB01\u201d or \u201cNFPW01\u201d (six bytes, representing NFP/Broker or NFP/Worker v0.1)\nFrame 2: 0x02 (one byte, representing KEEPALIVE)\nFrame 3: Service name (printable string)\n

    Broker sends KEEPALIVE messages to workers to indicate broker is still alive.

    Workers send KEEPALIVE messages to broker to indicate worker is still alive.

    "},{"location":"NFP/#disconnect","title":"DISCONNECT","text":"

    A DISCONNECT command consists of 3 frames, formatted on the wire as follows:

    DISCONNECT command\n---------------------------------------------------------------\nFrame 0: Empty frame\nFrame 1: \u201cNFPB01\u201d or \u201cNFPW01\u201d (six bytes, representing NFP/Broker or NFP/Worker v0.1)\nFrame 2: 0x03 (one byte, representing DISCONNECT)\nFrame 3: Service name (printable string)\nFrame 4: Disconnect body (opaque binary)\n

    Broker sends DISCONNECT command to workers to signal the request to disconnect.

    Workers also can send DISCONNECT command to broker to signal the request to disconnect.

    "},{"location":"NFP/#post","title":"POST","text":"

    A POST command consists of 7 or more frames, formatted on the wire as follows:

    POST command\n---------------------------------------------------------------\nFrame 0: Empty (zero bytes, invisible to REQ application)\nFrame 1: \u201cNFPC01\u201d or \"NFPB01\" (six bytes, representing NFP/Client or NFP/Broker v0.1)\nFrame 2: 0x04 (one byte, representing POST)\nFrame 3: Service name (printable string)\nFrame 4: Target (printable string) workers, `all` (default), `any` or comma separated `worker names`\nFrame 5: Job UUID (printable string)\nFrames 6: POST body (opaque binary)\n

    Client sends POST message to broker to distribute job requests among workers.

    Broker relays POST message to individual workers to publish job request.

    "},{"location":"NFP/#response","title":"RESPONSE","text":"

    A RESPONSE command consists of 7 or more frames, formatted on the wire as follows:

    RESPONSE command\n---------------------------------------------------------------\nFrame 0: Empty (zero bytes, invisible to REQ application)\nFrame 1: \u201cNFPB01\u201d or \u201cNFPW01\u201d (six bytes, representing NFP/Broker or NFP/Worker v0.1)\nFrame 2: 0x05 (one byte, representing RESPONSE)\nFrame 3: Service name (printable string)\nFrame 4: Job UUID (printable string)\nFrame 5: Status code (explained below)\nFrames 6: Response body (opaque binary)\n

    Worker sends RESPONSE message to broker with requests status or job results.

    Broker relays RESPONSE message to client.

    "},{"location":"NFP/#get","title":"GET","text":"

    A GET command consists of 7 or more frames, formatted on the wire as follows:

    GET command\n---------------------------------------------------------------\nFrame 0: Empty (zero bytes, invisible to REQ application)\nFrame 1: \u201cNFPC01\u201d or \"NFPB01\" (six bytes, representing NFP/Client or NFP/Broker v0.1)\nFrame 2: 0x06 (one byte, representing GET)\nFrame 3: Service name (printable string)\nFrame 4: Target (printable string) workers, `all` (default), `any` or comma separated `worker names`\nFrame 5: Job UUID (printable string)\nFrames 6: GET request body (opaque binary)\n

    Client sends GET message to broker to retrieve job results.

    Broker relays GET message to individual workers to request job request.

    "},{"location":"NFP/#delete","title":"DELETE","text":"

    A DELETE command consists of 7 or more frames, formatted on the wire as follows:

    DELETE command\n---------------------------------------------------------------\nFrame 0: Empty (zero bytes, invisible to REQ application)\nFrame 1: \u201cNFPC01\u201d or \"NFPB01\" (six bytes, representing NFP/Client or NFP/Broker v0.1)\nFrame 2: 0x07 (one byte, representing POST)\nFrame 3: Service name (printable string)\nFrame 4: Target (printable string) workers, `all` (default), `any` or comma separated `worker names`\nFrame 5: Job UUID (printable string)\nFrames 6: DELETE body (opaque binary)\n

    Client sends DELETE message to broker to distribute job delete requests to workers.

    Broker relays DELETE message to individual workers to cancel the job.

    "},{"location":"NFP/#event","title":"EVENT","text":"

    A EVENT command consists of 7 or more frames, formatted on the wire as follows:

    EVENT command\n---------------------------------------------------------------\nFrame 0: Empty (zero bytes, invisible to REQ application)\nFrame 1: \u201cNFPW01\u201d (six bytes, representing NFP/Worker v0.1)\nFrame 2: 0x08 (one byte, representing EVENT)\nFrame 3: Service name (printable string)\nFrame 4: Topic (printable string e.g. Job UUID)\nFrame 5: Status code 200 (explained below)\nFrames 6: Event body (opaque binary)\n

    Worker sends EVENT message to Broker to supply information about job execution.

    Broker relays EVENT message to certain Client.

    "},{"location":"NFP/#status-frames","title":"Status Frames","text":"

    Every RESPONSE message contains a status frame followed by zero or more content frames. The status frame contains a string formatted as three digits, optionally followed by a space and descriptive text. A client MUST NOT treat the text as significant in any way. Implementations MAY NOT use status codes that are not defined here:

    200 - OK. The NORFAB worker executed the request successfully. 202 - ACCEPTED. The NORFAB Broker accepted POST request to dispatch the job. 300 - PENDING. The client SHOULD retry the request at a later time. 400 - UNKNOWN. The client is using an invalid or unknown UUID and SHOULD NOT retry. 408 - REQUEST TIMEOUT. Client did not receive response from broker or worker. 417 - EXPECT FAILED. Client did not receive what it was expecting to receive. 500 - ERROR. The server cannot complete the request due to some internal error. The client SHOULD retry at some later time.

    "},{"location":"NFP/#nfpclient","title":"NFP/Client","text":"

    NFP/Client is a strictly synchronous dialog initiated by the client (where \u2018C\u2019 represents the client, and \u2018B\u2019 represents the broker):

    C: OPEN\nB: OPEN\n\nRepeat:\n\n    C: POST\n    B: RESPONSE\n    ...\n\n    C: GET\n    B: RESPONSE\n    ...\n

    Clients SHOULD use a REQ socket when implementing a synchronous request-reply pattern. The REQ socket will silently create frame 0 for outgoing requests, and remove it for replies before passing them to the calling application.

    Clients MAY use any suitable strategy for recovering from a non-responsive broker. One recommended strategy is:

    • To use polling instead of blocking receives on the request socket.
    • If there is no reply within some timeout, to close the request socket and open a new socket, and resend the request on that new socket.
    • If there is no reply after several retries, to signal the transaction as failed.
    • The service name is a 0MQ string that matches the service name specified by a worker in its READY command (see NFP/Worker below). The broker SHOULD queue client requests for which service no workers has been registered and SHOULD expire these requests after a reasonable and configurable time if no service's workers has been registered.
    "},{"location":"NFP/#nfpbroker","title":"NFP/Broker","text":"

    NFP/Broker is a mediator that receives messages from clients and dispatches them out to workers. In return messages from workers routed to clients.

    "},{"location":"NFP/#nfpworker","title":"NFP/Worker","text":"

    NFP/Worker is a mix of a synchronous request-reply dialog, initiated by the service worker, and an asynchronous heartbeat dialog that operates independently in both directions. This is the synchronous dialog (where \u2018W\u2019 represents the service worker, and \u2018B\u2019 represents the broker):

    W: OPEN\nB: OPEN\nW: READY\n\nRepeat:\n\n    B: POST\n    W: RESPONSE\n    ...\n\n    B: GET\n    W: RESPONSE\n    ... \n

    The asynchronous heartbeat dialog operates on the same sockets and works thus:

    Repeat:                 Repeat:\n\n    W: HEARTBEAT            B: HEARTBEAT\n    ...                     ...\n\nW: DISCONNECT           B: DISCONNECT\n

    NFP/Worker commands all start with an empty frame to allow consistent processing of client and worker frames in a broker, over a single socket. The empty frame has no other significance.

    "},{"location":"NFP/#nfpworker-pub","title":"NFP/Worker-PUB","text":"

    TBD

    "},{"location":"NFP/#nfpbroker-pub","title":"NFP/Broker-PUB","text":"

    TBD

    "},{"location":"NFP/#job-persistence","title":"Job Persistence","text":"

    Workers SHOULD persistently store job requests and job execution results for a configurable amount of time allowing clients (client submitted job request or any other client) to request job execution results on demand.

    Clients SHOULD persistently store job requests and MAY store job execution results locally for a configurable amount of time.

    "},{"location":"NFP/#opening-and-closing-a-connection","title":"Opening and Closing a Connection","text":"

    The worker is responsible for opening and closing a logical connection. One worker MUST connect to exactly one broker using a single \u00d8MQ DEALER (XREQ) socket.

    Since \u00d8MQ automatically reconnects peers after a failure, every NFP command includes the protocol header to allow proper validation of all messages that a peer receives.

    The worker opens the connection to the broker by creating a new socket, connecting it, and then sending a READY command to register to a service. One worker handles precisely one service, and many workers MAY handle the same service. The worker MUST NOT send a further READY.

    There is no response to a READY. The worker SHOULD assume the registration succeeded until or unless it receives a DISCONNECT, or it detects a broker failure through heartbeating.

    The worker MAY send DISCONNECT at any time, including before READY. When the broker receives DISCONNECT from a worker it MUST send no further commands to that worker.

    The broker MAY send DISCONNECT at any time, by definition after it has received at least one command from the worker.

    The broker MUST respond to any valid but unexpected command by sending DISCONNECT and then no further commands to that worker. The broker SHOULD respond to invalid messages by dropping them and treating that peer as invalid.

    When the worker receives DISCONNECT it must send no further commands to the broker; it MUST close its socket, and reconnect to the broker on a new socket. This mechanism allows workers to re-register after a broker failure and recovery.

    "},{"location":"NFP/#post-and-response-processing","title":"POST and RESPONSE Processing","text":"

    The POST and the RESPONSE commands MUST contain precisely one client address frame. This frame MUST be followed by an empty (zero sized) frame.

    The address of each directly connected client is prepended by the ROUTER socket to all request messages coming from clients. That ROUTER socket also expects a client address to be prepended to each reply message sent to a client.

    "},{"location":"NFP/#keepaliving","title":"Keepaliving","text":"

    KEEPALIVE commands are valid at any time, after a READY command.

    Any received command except DISCONNECT acts as a keepalive. Peers SHOULD NOT send KEEPALIVE commands while also sending other commands.

    Both broker and worker MUST send heartbeats at regular and agreed-upon intervals. A peer MUST consider the other peer \u201cdisconnected\u201d if no keepalive arrives within some multiple of that interval (usually 3-5).

    If the worker detects that the broker has disconnected, it SHOULD restart a new conversation.

    If the broker detects that the worked has disconnected, it SHOULD stop sending messages of any type to that worker.

    "},{"location":"NFP/#broker-management-interface-bmmi","title":"Broker Management Interface (BMMI)","text":"

    Broker SHOULD implement Management interface as a service endpoint for clients to interact with.

    Broker should use mmi.service.broker service endpoint to listen to client's requests.

    These MMI functions SHOULD be implemented:

    • show_broker - to return broker status and statistics
    • show_workers - to return worker status and statistics
    • show_clients - to return clients statistics
    • show_services - to return services status and statistics
    • restart - restart broker
    • shutdown - shutdown broker completely
    • disconnect - to disconnect all workers
    "},{"location":"NFP/#worker-management-interface-wmmi","title":"Worker Management Interface (WMMI)","text":"

    Worker SHOULD implement Management interface as a service endpoint for clients to interact with.

    Worker should use mmi.service.worker service endpoint to listen to client's requests.

    These MMI functions SHOULD be implemented:

    • show_broker - to return broker status and statistics
    • show_workers - to return worker status and statistics
    • show_clients - to return clients statistics
    • restart - restart worker
    • shutdown - shutdown worker completely
    • disconnect - to disconnect worker from broker and re-establish connection
    "},{"location":"NFP/#broker-simple-inventory-datastore-sid","title":"Broker Simple Inventory Datastore (SID)","text":"

    Broker should implement Inventory Datastore to store and serve configuration to workers as well as arbitrary workers inventory data.

    Broker should use sid.service.broker service endpoint to listen to worker's requests.

    Workers willing to make use of broker's inventory datastore should implement NFP/Client protocol defined above to request inventory data.

    These SID functions SHOULD be implemented:

    • get_inventory - to return inventory content for given worker
    "},{"location":"NFP/#sid-implementation","title":"SID Implementation","text":"

    TBD

    "},{"location":"NFP/#broker-file-sharing-service-fss","title":"Broker File Sharing Service (FSS)","text":"

    Broker implements service to serve files to clients and workers from local file system using nf://<filepath> URL for supported arguments.

    Broker should use fss.service.broker service endpoint to listen to worker's requests.

    "},{"location":"NFP/#fss-implementation","title":"FSS Implementation","text":"

    TBD

    "},{"location":"NFP/#reliability","title":"Reliability","text":"

    The NORFAB pattern is designed to extend the basic \u00d8MQ request-reply pattern with the ability to detect and recover from a specific set of failures:

    • Worker applications which crash, run too slowly, or freeze.
    • Worker applications that are disconnected from the network (temporarily or permanently).
    • Client applications that are temporarily disconnected from the network.
    • A queue broker that crashes and is restarted.
    • A queue broker that suffers a permanent failure.
    • Requests or replies that are lost due to any of these failures.
    • The general approach is to retry and reconnect, using heartbeating when needed.
    "},{"location":"NFP/#scalability-and-performance","title":"Scalability and Performance","text":"

    NORFAB is designed to be scalable to large numbers (thousands) of workers and clients allowing to manage 10s of thousands resource entities, limited only by system resources on the broker. Partitioning of workers by service allows for multiple applications to share the same broker infrastructure. Workers manage a set of resources defined by system administrator. Same resource can be managed by single or multiple workers, system impose no restrictions on how resource entities distributed across workers.

    Throughput performance for a single client application will be limited to tens of thousands, not millions, of request-reply transactions per second due to round-trip costs and the extra latency of a broker-based approach. The larger the request and reply messages, the more efficient NORFAB will become.

    System requirements for the broker are moderate: no more than one outstanding request per client will be queued, and message contents can be switched between clients and workers without copying or processing. A single broker thread can therefore switch several million messages per second.

    "},{"location":"NFP/#security","title":"Security","text":""},{"location":"NFP/#worker-authentication","title":"Worker Authentication","text":"

    TBD

    "},{"location":"NFP/#worker-authorization","title":"Worker Authorization","text":"

    TBD

    "},{"location":"NFP/#client-authentication","title":"Client Authentication","text":"

    TBD

    "},{"location":"NFP/#client-authorization-role-based-access-control-rbac","title":"Client Authorization - Role Based Access Control (RBAC)","text":"

    TBD

    "},{"location":"NFP/#client-encryption","title":"Client Encryption","text":"

    TBD

    "},{"location":"NFP/#worker-encryption","title":"Worker Encryption","text":"

    TBD

    "},{"location":"NFP/#accounting","title":"Accounting","text":"

    TBD

    "},{"location":"NFP/#known-weaknesses","title":"Known Weaknesses","text":"
    • The heartbeat rate must be set to similar values in broker and worker, or false disconnections will occur.
    • The use of multiple frames for command formatting has a performance impact.
    "},{"location":"netbox_worker_api_reference/","title":"Netbox Worker","text":""},{"location":"netbox_worker_api_reference/#norfab.workers.netbox_worker--netbox-worker-inventory-reference","title":"Netbox Worker Inventory Reference","text":""},{"location":"netbox_worker_api_reference/#norfab.workers.netbox_worker--sample-netbox-worker-inventory","title":"Sample Netbox Worker Inventory","text":"
    service: netbox\nbroker_endpoint: \"tcp://127.0.0.1:5555\"\ninstances:\n  prod:\n    default: True\n    url: \"http://192.168.4.130:8000/\"\n    token: \"0123456789abcdef0123456789abcdef01234567\"\n    ssl_verify: False\n  dev:\n    url: \"http://192.168.4.131:8000/\"\n    token: \"0123456789abcdef0123456789abcdef01234567\"\n    ssl_verify: False\n  preprod:\n    url: \"http://192.168.4.132:8000/\"\n    token: \"0123456789abcdef0123456789abcdef01234567\"\n    ssl_verify: False\n
    "},{"location":"netbox_worker_api_reference/#norfab.workers.netbox_worker--sample-nornir-worker-netbox-inventory","title":"Sample Nornir Worker Netbox Inventory","text":"
    netbox:\n  retry: 3\n  retry_interval: 1\n  instance: prod\n  interfaces:\n    ip_addresses: True\n    inventory_items: True\n  connections:\n    cables: True\n    circuits: True\n  nbdata: True\n  primary_ip: \"ipv4\"\n  devices:\n    - fceos4\n    - fceos5\n    - fceos8\n    - ceos1\n  filters: \n    - q: fceos3\n    - manufacturer: cisco\n      platform: cisco_xr\n
    "},{"location":"netbox_worker_api_reference/#norfab.workers.netbox_worker.NetboxWorker","title":"NetboxWorker(broker, service, worker_name, exit_event=None, init_done_event=None, log_level='WARNING')","text":"

    Bases: NFPWorker

    Parameters:

    Name Type Description Default broker

    broker URL to connect to

    required service

    name of the service with worker belongs to

    required worker_name

    name of this worker

    required exit_event

    if set, worker need to stop/exit

    None init_done_event

    event to set when worker done initializing

    None log_keve

    logging level of this worker

    required Source code in norfab\\workers\\netbox_worker.py
    def __init__(\n    self,\n    broker,\n    service,\n    worker_name,\n    exit_event=None,\n    init_done_event=None,\n    log_level=\"WARNING\",\n):\n    super().__init__(broker, service, worker_name, exit_event, log_level)\n    self.init_done_event = init_done_event\n\n    # get inventory from broker\n    self.inventory = self.load_inventory()\n    if not self.inventory:\n        log.critical(\n            f\"{self.name} - Broker {self.broker} returned no inventory for {self.name}, killing myself...\"\n        )\n        self.destroy()\n\n    assert self.inventory.get(\n        \"instances\"\n    ), f\"{self.name} - inventory has no Netbox instances\"\n\n    # find default instance\n    for name, params in self.inventory[\"instances\"].items():\n        if params.get(\"default\") is True:\n            self.default_instance = name\n            break\n    else:\n        self.default_instance = name\n\n    # check Netbox compatibility\n    self._verify_compatibility()\n\n    self.init_done_event.set()\n    log.info(f\"{self.name} - Started\")\n
    "},{"location":"netbox_worker_api_reference/#norfab.workers.netbox_worker.NetboxWorker.graphql","title":"graphql(instance=None, dry_run=False, obj=None, filters=None, fields=None, queries=None, query_string=None)","text":"

    Function to query Netbox v4 GraphQL API

    Parameters:

    Name Type Description Default instance str

    Netbox instance name

    None dry_run bool

    only return query content, do not run it

    False Source code in norfab\\workers\\netbox_worker.py
    def graphql(\n    self,\n    instance: str = None,\n    dry_run: bool = False,\n    obj: dict = None,\n    filters: dict = None,\n    fields: list = None,\n    queries: dict = None,\n    query_string: str = None,\n) -> Result:\n    \"\"\"\n    Function to query Netbox v4 GraphQL API\n\n    :param instance: Netbox instance name\n    :param dry_run: only return query content, do not run it\n    \"\"\"\n    nb_params = self._get_instance_params(instance)\n    ret = Result(task=f\"{self.name}:graphql\")\n\n    # form graphql query(ies) payload\n    if queries:\n        queries_list = []\n        for alias, query_data in queries.items():\n            query_data[\"alias\"] = alias\n            if self.nb_version[0] == 4:\n                queries_list.append(_form_query_v4(**query_data))\n            elif self.nb_version[0] == 3:\n                queries_list.append(_form_query_v3(**query_data))\n        queries_strings = \"    \".join(queries_list)\n        query = f\"query {{{queries_strings}}}\"\n    elif obj and filters and fields:\n        if self.nb_version[0] == 4:\n            query = _form_query_v4(obj, filters, fields)\n        elif self.nb_version[0] == 3:\n            query = _form_query_v3(obj, filters, fields)\n        query = f\"query {{{query}}}\"\n    elif query_string:\n        query = query_string\n    else:\n        raise RuntimeError(\n            f\"{self.name} - graphql method expects quieries argument or obj, filters, \"\n            f\"fields arguments or query_string argument provided\"\n        )\n    payload = json.dumps({\"query\": query})\n\n    # form and return dry run response\n    if dry_run:\n        ret.result = {\n            \"url\": f\"{nb_params['url']}/graphql/\",\n            \"data\": payload,\n            \"verify\": nb_params.get(\"ssl_verify\", True),\n            \"headers\": {\n                \"Content-Type\": \"application/json\",\n                \"Accept\": \"application/json\",\n                \"Authorization\": f\"Token ...{nb_params['token'][-6:]}\",\n            },\n        }\n        return ret\n\n    # send request to Netbox GraphQL API\n    log.debug(\n        f\"{self.name} - sending GraphQL query '{payload}' to URL '{nb_params['url']}/graphql/'\"\n    )\n    req = requests.post(\n        url=f\"{nb_params['url']}/graphql/\",\n        headers={\n            \"Content-Type\": \"application/json\",\n            \"Accept\": \"application/json\",\n            \"Authorization\": f\"Token {nb_params['token']}\",\n        },\n        data=payload,\n        verify=nb_params.get(\"ssl_verify\", True),\n        timeout=(3, 600),\n    )\n    try:\n        req.raise_for_status()\n    except Exception as e:\n        raise Exception(\n            f\"{self.name} -  Netbox GraphQL query failed, query '{query}', \"\n            f\"URL '{req.url}', status-code '{req.status_code}', reason '{req.reason}', \"\n            f\"response content '{req.text}'\"\n        )\n\n    # return results\n    reply = req.json()\n    if reply.get(\"errors\"):\n        msg = f\"{self.name} - GrapQL query error '{reply['errors']}', query '{payload}'\"\n        log.error(msg)\n        ret.errors.append(msg)\n        if reply.get(\"data\"):\n            ret.result = reply[\"data\"]  # at least return some data\n    elif queries or query_string:\n        ret.result = reply[\"data\"]\n    else:\n        ret.result = reply[\"data\"][obj]\n\n    return ret\n
    "},{"location":"netbox_worker_api_reference/#norfab.workers.netbox_worker.NetboxWorker.rest","title":"rest(instance=None, method='get', api='', **kwargs)","text":"

    Method to query Netbox REST API.

    Parameters:

    Name Type Description Default instance str

    Netbox instance name

    None method str

    requests method name e.g. get, post, put etc.

    'get' api str

    api url to query e.g. \"extras\" or \"dcim/interfaces\" etc.

    '' kwargs

    any additional requests method's arguments

    {} Source code in norfab\\workers\\netbox_worker.py
    def rest(\n    self, instance: str = None, method: str = \"get\", api: str = \"\", **kwargs\n) -> dict:\n    \"\"\"\n    Method to query Netbox REST API.\n\n    :param instance: Netbox instance name\n    :param method: requests method name e.g. get, post, put etc.\n    :param api: api url to query e.g. \"extras\" or \"dcim/interfaces\" etc.\n    :param kwargs: any additional requests method's arguments\n    \"\"\"\n    params = self._get_instance_params(instance)\n\n    # send request to Netbox REST API\n    response = getattr(requests, method)(\n        url=f\"{params['url']}/api/{api}/\",\n        headers={\n            \"Content-Type\": \"application/json\",\n            \"Accept\": \"application/json\",\n            \"Authorization\": f\"Token {params['token']}\",\n        },\n        verify=params.get(\"ssl_verify\", True),\n        **kwargs,\n    )\n\n    response.raise_for_status()\n\n    return response.json()\n
    "},{"location":"netbox_worker_api_reference/#norfab.workers.netbox_worker.NetboxWorker.get_devices","title":"get_devices(filters=None, instance=None, dry_run=False, devices=None)","text":"

    Function to retrieve devices data from Netbox using GraphQL API.

    Parameters:

    Name Type Description Default filters list

    list of filters dictionaries to filter devices

    None instance str

    Netbox instance name

    None dry_run bool

    only return query content, do not run it

    False devices list

    list of device names to query data for

    None

    Returns:

    Type Description Result

    dictionary keyed by device name with device data

    Source code in norfab\\workers\\netbox_worker.py
    def get_devices(\n    self,\n    filters: list = None,\n    instance: str = None,\n    dry_run: bool = False,\n    devices: list = None,\n) -> Result:\n    \"\"\"\n    Function to retrieve devices data from Netbox using GraphQL API.\n\n    :param filters: list of filters dictionaries to filter devices\n    :param instance: Netbox instance name\n    :param dry_run: only return query content, do not run it\n    :param devices: list of device names to query data for\n    :return: dictionary keyed by device name with device data\n    \"\"\"\n    ret = Result(task=f\"{self.name}:get_devices\", result={})\n    instance = instance or self.default_instance\n    filters = filters or []\n\n    device_fields = [\n        \"name\",\n        \"last_updated\",\n        \"custom_field_data\",\n        \"tags {name}\",\n        \"device_type {model}\",\n        \"role {name}\",\n        \"config_context\",\n        \"tenant {name}\",\n        \"platform {name}\",\n        \"serial\",\n        \"asset_tag\",\n        \"site {name tags{name}}\",\n        \"location {name}\",\n        \"rack {name}\",\n        \"status\",\n        \"primary_ip4 {address}\",\n        \"primary_ip6 {address}\",\n        \"airflow\",\n        \"position\",\n    ]\n\n    # form queries dictionary out of filters\n    queries = {\n        f\"devices_by_filter_{index}\": {\n            \"obj\": \"device_list\",\n            \"filters\": filter_item,\n            \"fields\": device_fields,\n        }\n        for index, filter_item in enumerate(filters)\n    }\n\n    # add devices list query\n    if devices:\n        if self.nb_version[0] == 4:\n            dlist = '[\"{dl}\"]'.format(dl='\", \"'.join(devices))\n            filters_dict = {\"name\": f\"{{in_list: {dlist}}}\"}\n        elif self.nb_version[0] == 3:\n            filters_dict = {\"name\": devices}\n        queries[\"devices_by_devices_list\"] = {\n            \"obj\": \"device_list\",\n            \"filters\": filters_dict,\n            \"fields\": device_fields,\n        }\n\n    # send queries\n    query_result = self.graphql(queries=queries, instance=instance, dry_run=dry_run)\n    devices_data = query_result.result\n\n    # return dry run result\n    if dry_run:\n        return query_result\n\n    # check for errors\n    if query_result.errors:\n        msg = f\"{self.name} - get devices query failed with errors:\\n{query_result.errors}\"\n        raise Exception(msg)\n\n    # process devices\n    for devices_list in devices_data.values():\n        for device in devices_list:\n            if device[\"name\"] not in ret.result:\n                ret.result[device.pop(\"name\")] = device\n\n    return ret\n
    "},{"location":"netbox_worker_api_reference/#norfab.workers.netbox_worker.NetboxWorker.get_interfaces","title":"get_interfaces(instance=None, devices=None, ip_addresses=False, inventory_items=False, dry_run=False)","text":"

    Function to retrieve device interfaces from Netbox using GraphQL API.

    Parameters:

    Name Type Description Default instance str

    Netbox instance name

    None devices list

    list of devices to retrieve interfaces for

    None ip_addresses bool

    if True, retrieves interface IPs

    False inventory_items bool

    if True, retrieves interface inventory items

    False dry_run bool

    only return query content, do not run it

    False

    Returns:

    Type Description Result

    dictionary keyed by device name with interface details

    Source code in norfab\\workers\\netbox_worker.py
    def get_interfaces(\n    self,\n    instance: str = None,\n    devices: list = None,\n    ip_addresses: bool = False,\n    inventory_items: bool = False,\n    dry_run: bool = False,\n) -> Result:\n    \"\"\"\n    Function to retrieve device interfaces from Netbox using GraphQL API.\n\n    :param instance: Netbox instance name\n    :param devices: list of devices to retrieve interfaces for\n    :param ip_addresses: if True, retrieves interface IPs\n    :param inventory_items: if True, retrieves interface inventory items\n    :param dry_run: only return query content, do not run it\n    :return: dictionary keyed by device name with interface details\n    \"\"\"\n    # form final result object\n    ret = Result(\n        task=f\"{self.name}:get_interfaces\", result={d: {} for d in devices}\n    )\n    intf_fields = [\n        \"name\",\n        \"enabled\",\n        \"description\",\n        \"mtu\",\n        \"parent {name}\",\n        \"mac_address\",\n        \"mode\",\n        \"untagged_vlan {vid name}\",\n        \"vrf {name}\",\n        \"tagged_vlans {vid name}\",\n        \"tags {name}\",\n        \"custom_fields\",\n        \"last_updated\",\n        \"bridge {name}\",\n        \"child_interfaces {name}\",\n        \"bridge_interfaces {name}\",\n        \"member_interfaces {name}\",\n        \"wwn\",\n        \"duplex\",\n        \"speed\",\n        \"id\",\n        \"device {name}\",\n    ]\n\n    # add IP addresses to interfaces fields\n    if ip_addresses:\n        intf_fields.append(\n            \"ip_addresses {address status role dns_name description custom_fields last_updated tenant {name} tags {name}}\"\n        )\n\n    # form interfaces query dictionary\n    queries = {\n        \"interfaces\": {\n            \"obj\": \"interface_list\",\n            \"filters\": {\"device\": devices},\n            \"fields\": intf_fields,\n        }\n    }\n\n    # add query to retrieve inventory items\n    if inventory_items:\n        inv_filters = {\"device\": devices, \"component_type\": \"dcim.interface\"}\n        inv_fields = [\n            \"name\",\n            \"component {... on InterfaceType {id}}\",\n            \"role {name}\",\n            \"manufacturer {name}\",\n            \"custom_fields\",\n            \"label\",\n            \"description\",\n            \"tags {name}\",\n            \"asset_tag\",\n            \"serial\",\n            \"part_id\",\n        ]\n        queries[\"inventor_items\"] = {\n            \"obj\": \"inventory_item_list\",\n            \"filters\": inv_filters,\n            \"fields\": inv_fields,\n        }\n\n    query_result = self.graphql(instance=instance, queries=queries, dry_run=dry_run)\n\n    # return dry run result\n    if dry_run:\n        return query_result\n\n    interfaces_data = query_result.result\n\n    # exit if no Interfaces returned\n    if not interfaces_data.get(\"interfaces\"):\n        raise Exception(\n            f\"{self.name} - no interfaces data in '{interfaces_data}' returned by '{instance}' \"\n            f\"for devices {', '.join(devices)}\"\n        )\n\n    # process query results\n    interfaces = interfaces_data.pop(\"interfaces\")\n\n    # process inventory items\n    if inventory_items:\n        inventory_items_list = interfaces_data.pop(\"inventor_items\")\n        # transform inventory items list to a dictionary keyed by intf_id\n        inventory_items_dict = {}\n        while inventory_items_list:\n            inv_item = inventory_items_list.pop()\n            # skip inventory items that does not assigned to components\n            if inv_item.get(\"component\") is None:\n                continue\n            intf_id = str(inv_item.pop(\"component\").pop(\"id\"))\n            inventory_items_dict.setdefault(intf_id, [])\n            inventory_items_dict[intf_id].append(inv_item)\n        # iterate over interfaces and add inventory items\n        for intf in interfaces:\n            intf[\"inventory_items\"] = inventory_items_dict.pop(intf[\"id\"], [])\n\n    # transform interfaces list to dictionary keyed by device and interfaces names\n    while interfaces:\n        intf = interfaces.pop()\n        _ = intf.pop(\"id\")\n        device_name = intf.pop(\"device\").pop(\"name\")\n        intf_name = intf.pop(\"name\")\n        if device_name in ret.result:  # Netbox issue #16299\n            ret.result[device_name][intf_name] = intf\n\n    return ret\n
    "},{"location":"netbox_worker_api_reference/#norfab.workers.netbox_worker.NetboxWorker.get_connections","title":"get_connections(devices, instance=None, dry_run=False, cables=False, circuits=False)","text":"

    Function to retrieve device connections data from Netbox using GraphQL API.

    Parameters:

    Name Type Description Default instance str

    Netbox instance name

    None devices list

    list of devices to retrieve interface for

    required dry_run bool

    only return query content, do not run it

    False cables bool

    if True includes interfaces' directly attached cables details

    False circuits bool

    if True includes interfaces' circuits termination details

    False

    Returns:

    Type Description Result

    dictionary keyed by device name with connections data

    Source code in norfab\\workers\\netbox_worker.py
    def get_connections(\n    self,\n    devices: list,\n    instance: str = None,\n    dry_run: bool = False,\n    cables: bool = False,\n    circuits: bool = False,\n) -> Result:\n    \"\"\"\n    Function to retrieve device connections data from Netbox using GraphQL API.\n\n    :param instance: Netbox instance name\n    :param devices: list of devices to retrieve interface for\n    :param dry_run: only return query content, do not run it\n    :param cables: if True includes interfaces' directly attached cables details\n    :param circuits: if True includes interfaces' circuits termination details\n    :return: dictionary keyed by device name with connections data\n    \"\"\"\n    # form final result dictionary\n    ret = Result(\n        task=f\"{self.name}:get_connections\", result={d: {} for d in devices}\n    )\n\n    # form lists of fields to request from netbox\n    cable_fields = \"\"\"\n        cable {\n            type\n            status\n            tenant {name}\n            label\n            tags {name}\n            length\n            length_unit\n            custom_fields\n        }\n    \"\"\"\n    if self.nb_version[0] == 4:\n        interfaces_fields = [\n            \"name\",\n            \"device {name}\",\n            \"\"\"connected_endpoints {\n            __typename \n            ... on InterfaceType {name device {name}}\n            ... on ProviderNetworkType {name}\n            }\"\"\",\n        ]\n    elif self.nb_version[0] == 3:\n        interfaces_fields = [\n            \"name\",\n            \"device {name}\",\n            \"\"\"connected_endpoints {\n            __typename \n            ... on InterfaceType {name device {name}}\n            }\"\"\",\n        ]\n    console_ports_fields = [\n        \"name\",\n        \"device {name}\",\n        \"\"\"connected_endpoints {\n          __typename \n          ... on ConsoleServerPortType {name device {name}}\n        }\"\"\",\n        \"\"\"link_peers {\n          __typename\n          ... on ConsoleServerPortType {name device {name}}\n          ... on FrontPortType {name device {name}}\n          ... on RearPortType {name device {name}}\n        }\"\"\",\n    ]\n    console_server_ports_fields = [\n        \"name\",\n        \"device {name}\",\n        \"\"\"connected_endpoints {\n          __typename \n          ... on ConsolePortType {name device {name}}\n        }\"\"\",\n        \"\"\"link_peers {\n          __typename\n          ... on ConsolePortType {name device {name}}\n          ... on FrontPortType {name device {name}}\n          ... on RearPortType {name device {name}}\n        }\"\"\",\n    ]\n\n    # add circuits info\n    if circuits is True:\n        interfaces_fields.append(\n            \"\"\"\n            link_peers {\n                __typename\n                ... on InterfaceType {name device {name}}\n                ... on FrontPortType {name device {name}}\n                ... on RearPortType {name device {name}}\n                ... on CircuitTerminationType {\n                    circuit{\n                        cid \n                        description \n                        tags{name} \n                        provider{name} \n                        status\n                        custom_fields\n                        commit_rate\n                    }\n                }\n            }\n        \"\"\"\n        )\n    else:\n        interfaces_fields.append(\n            \"\"\"\n            link_peers {\n                __typename\n                ... on InterfaceType {name device {name}}\n                ... on FrontPortType {name device {name}}\n                ... on RearPortType {name device {name}}\n            }\n        \"\"\"\n        )\n\n    # check if need to include cables info\n    if cables is True:\n        interfaces_fields.append(cable_fields)\n        console_ports_fields.append(cable_fields)\n        console_server_ports_fields.append(cable_fields)\n\n    # form query dictionary with aliases to get data from Netbox\n    queries = {\n        \"interface\": {\n            \"obj\": \"interface_list\",\n            \"filters\": {\"device\": devices},\n            \"fields\": interfaces_fields,\n        },\n        \"consoleport\": {\n            \"obj\": \"console_port_list\",\n            \"filters\": {\"device\": devices},\n            \"fields\": console_ports_fields,\n        },\n        \"consoleserverport\": {\n            \"obj\": \"console_server_port_list\",\n            \"filters\": {\"device\": devices},\n            \"fields\": console_server_ports_fields,\n        },\n    }\n\n    # retrieve full list of devices interface with all cables\n    query_result = self.graphql(queries=queries, instance=instance, dry_run=dry_run)\n\n    # return dry run result\n    if dry_run:\n        return query_result\n\n    all_ports = query_result.result\n\n    # extract interfaces\n    for port_type, ports in all_ports.items():\n        for port in ports:\n            endpoints = port[\"connected_endpoints\"]\n            # skip ports that have no remote device connected\n            if not endpoints or not all(i for i in endpoints):\n                continue\n\n            # extract required parameters\n            cable = port.get(\"cable\", {})\n            device_name = port[\"device\"][\"name\"]\n            port_name = port[\"name\"]\n            link_peers = port[\"link_peers\"]\n            remote_termination_type = endpoints[0][\"__typename\"].lower()\n            remote_termination_type = remote_termination_type.replace(\"type\", \"\")\n\n            # form initial connection dictionary\n            connection = {\n                \"breakout\": len(endpoints) > 1,\n                \"remote_termination_type\": remote_termination_type,\n                \"termination_type\": port_type,\n            }\n\n            # add remote connection details\n            if remote_termination_type == \"providernetwork\":\n                connection[\"remote_device\"] = None\n                connection[\"remote_interface\"] = None\n                connection[\"provider\"] = endpoints[0][\"name\"]\n            else:\n                remote_interface = endpoints[0][\"name\"]\n                if len(endpoints) > 1:\n                    remote_interface = [i[\"name\"] for i in endpoints]\n                connection[\"remote_interface\"] = remote_interface\n                connection[\"remote_device\"] = endpoints[0][\"device\"][\"name\"]\n\n            # handle circuits\n            if (\n                circuits and \"circuit\" in link_peers[0]\n            ):  # add circuit connection details\n                connection[\"circuit\"] = link_peers[0][\"circuit\"]\n\n            # add cable and its peer details\n            if cables:\n                peer_termination_type = link_peers[0][\"__typename\"].lower()\n                peer_termination_type = peer_termination_type.replace(\"type\", \"\")\n                cable[\"peer_termination_type\"] = peer_termination_type\n                cable[\"peer_device\"] = link_peers[0].get(\"device\", {}).get(\"name\")\n                cable[\"peer_interface\"] = link_peers[0].get(\"name\")\n                if len(link_peers) > 1:  # handle breakout cable\n                    cable[\"peer_interface\"] = [i[\"name\"] for i in link_peers]\n                connection[\"cable\"] = cable\n\n            ret.result[device_name][port_name] = connection\n\n    return ret\n
    "},{"location":"netbox_worker_api_reference/#norfab.workers.netbox_worker.NetboxWorker.get_circuits","title":"get_circuits(devices, instance=None, dry_run=False)","text":"

    Function to retrieve device circuits data from Netbox using GraphQL API.

    Parameters:

    Name Type Description Default devices list

    list of devices to retrieve interface for

    required instance str

    Netbox instance name

    None dry_run bool

    only return query content, do not run it

    False

    Returns:

    Type Description

    dictionary keyed by device name with circuits data

    Source code in norfab\\workers\\netbox_worker.py
    def get_circuits(\n    self,\n    devices: list,\n    instance: str = None,\n    dry_run: bool = False,\n):\n    \"\"\"\n    Function to retrieve device circuits data from Netbox using GraphQL API.\n\n    :param devices: list of devices to retrieve interface for\n    :param instance: Netbox instance name\n    :param dry_run: only return query content, do not run it\n    :return: dictionary keyed by device name with circuits data\n    \"\"\"\n    # form final result object\n    ret = Result(task=f\"{self.name}:get_circuits\", result={d: {} for d in devices})\n\n    device_sites_fields = [\"site {slug}\"]\n    circuit_fields = [\n        \"cid\",\n        \"tags {name}\",\n        \"provider {name}\",\n        \"commit_rate\",\n        \"description\",\n        \"status\",\n        \"type {name}\",\n        \"provider_account {name}\",\n        \"tenant {name}\",\n        \"termination_a {id}\",\n        \"termination_z {id}\",\n        \"custom_fields\",\n        \"comments\",\n    ]\n\n    # retrieve list of hosts' sites\n    if self.nb_version[0] == 4:\n        dlist = '[\"{dl}\"]'.format(dl='\", \"'.join(devices))\n        device_filters_dict = {\"name\": f\"{{in_list: {dlist}}}\"}\n    elif self.nb_version[0] == 3:\n        device_filters_dict = {\"name\": devices}\n    device_sites = self.graphql(\n        obj=\"device_list\",\n        filters=device_filters_dict,\n        fields=device_sites_fields,\n        instance=instance,\n    )\n    sites = list(set([i[\"site\"][\"slug\"] for i in device_sites.result]))\n\n    # retrieve all circuits for devices' sites\n    if self.nb_version[0] == 4:\n        circuits_filters_dict = {\"site\": sites}\n    elif self.nb_version[0] == 3:\n        circuits_filters_dict = {\"site\": sites}\n\n    query_result = self.graphql(\n        obj=\"circuit_list\",\n        filters=circuits_filters_dict,\n        fields=circuit_fields,\n        dry_run=dry_run,\n        instance=instance,\n    )\n\n    # return dry run result\n    if dry_run is True:\n        return query_result\n\n    all_circuits = query_result.result\n\n    # iterate over circuits and map them to devices\n    for circuit in all_circuits:\n        cid = circuit.pop(\"cid\")\n        circuit[\"tags\"] = [i[\"name\"] for i in circuit[\"tags\"]]\n        circuit[\"type\"] = circuit[\"type\"][\"name\"]\n        circuit[\"provider\"] = circuit[\"provider\"][\"name\"]\n        circuit[\"tenant\"] = circuit[\"tenant\"][\"name\"] if circuit[\"tenant\"] else None\n        circuit[\"provider_account\"] = (\n            circuit[\"provider_account\"][\"name\"]\n            if circuit[\"provider_account\"]\n            else None\n        )\n        termination_a = circuit.pop(\"termination_a\")\n        termination_z = circuit.pop(\"termination_z\")\n        termination_a = termination_a[\"id\"] if termination_a else None\n        termination_z = termination_z[\"id\"] if termination_z else None\n\n        # retrieve A or Z termination path using Netbox REST API\n        if termination_a is not None:\n            circuit_path = self.rest(\n                instance=instance,\n                method=\"get\",\n                api=f\"/circuits/circuit-terminations/{termination_a}/paths/\",\n            )\n        elif termination_z is not None:\n            circuit_path = self.rest(\n                instance=instance,\n                method=\"get\",\n                api=f\"/circuits/circuit-terminations/{termination_z}/paths/\",\n            )\n        else:\n            continue\n\n        # check if circuit ends connect to device or provider network\n        if (\n            not circuit_path\n            or \"name\" not in circuit_path[0][\"path\"][0][0]\n            or \"name\" not in circuit_path[0][\"path\"][-1][-1]\n        ):\n            continue\n\n        # form A and Z connection endpoints\n        end_a = {\n            \"device\": circuit_path[0][\"path\"][0][0]\n            .get(\"device\", {})\n            .get(\"name\", False),\n            \"provider_network\": \"provider-network\"\n            in circuit_path[0][\"path\"][0][0][\"url\"],\n            \"name\": circuit_path[0][\"path\"][0][0][\"name\"],\n        }\n        end_z = {\n            \"device\": circuit_path[0][\"path\"][-1][-1]\n            .get(\"device\", {})\n            .get(\"name\", False),\n            \"provider_network\": \"provider-network\"\n            in circuit_path[0][\"path\"][-1][-1][\"url\"],\n            \"name\": circuit_path[0][\"path\"][-1][-1][\"name\"],\n        }\n        circuit[\"is_active\"] = circuit_path[0][\"is_active\"]\n\n        # map path ends to devices\n        if end_a[\"device\"] and end_a[\"device\"] in devices:\n            ret.result[end_a[\"device\"]][cid] = copy.deepcopy(circuit)\n            ret.result[end_a[\"device\"]][cid][\"interface\"] = end_a[\"name\"]\n            if end_z[\"device\"]:\n                ret.result[end_a[\"device\"]][cid][\"remote_device\"] = end_z[\"device\"]\n                ret.result[end_a[\"device\"]][cid][\"remote_interface\"] = end_z[\"name\"]\n            elif end_z[\"provider_network\"]:\n                ret.result[end_a[\"device\"]][cid][\"provider_network\"] = end_z[\"name\"]\n        if end_z[\"device\"] and end_z[\"device\"] in devices:\n            ret.result[end_z[\"device\"]][cid] = copy.deepcopy(circuit)\n            ret.result[end_z[\"device\"]][cid][\"interface\"] = end_z[\"name\"]\n            if end_a[\"device\"]:\n                ret.result[end_z[\"device\"]][cid][\"remote_device\"] = end_a[\"device\"]\n                ret.result[end_z[\"device\"]][cid][\"remote_interface\"] = end_a[\"name\"]\n            elif end_a[\"provider_network\"]:\n                ret.result[end_z[\"device\"]][cid][\"provider_network\"] = end_a[\"name\"]\n\n    return ret\n
    "},{"location":"netbox_worker_api_reference/#norfab.workers.netbox_worker.NetboxWorker.get_nornir_inventory","title":"get_nornir_inventory(filters=None, devices=None, instance=None, interfaces=False, connections=False, circuits=False, nbdata=False, primary_ip='ip4')","text":"

    Method to query Netbox and return devices data in Nornir inventory format.

    Source code in norfab\\workers\\netbox_worker.py
    def get_nornir_inventory(\n    self,\n    filters: list = None,\n    devices: list = None,\n    instance: str = None,\n    interfaces: Union[dict, bool] = False,\n    connections: Union[dict, bool] = False,\n    circuits: Union[dict, bool] = False,\n    nbdata: bool = False,\n    primary_ip: str = \"ip4\",\n) -> Result:\n    \"\"\"\n    Method to query Netbox and return devices data in Nornir inventory format.\n    \"\"\"\n    hosts = {}\n    inventory = {\"hosts\": hosts}\n    ret = Result(task=f\"{self.name}:get_nornir_inventory\", result=inventory)\n\n    # check Netbox status\n    netbox_status = self.get_netbox_status(instance=instance)\n    if netbox_status.result[instance or self.default_instance][\"status\"] is False:\n        return ret\n\n    # retrieve devices data\n    nb_devices = self.get_devices(\n        filters=filters, devices=devices, instance=instance\n    )\n\n    # form Nornir hosts inventory\n    for device_name, device in nb_devices.result.items():\n        host = device[\"config_context\"].pop(\"nornir\", {})\n        host.setdefault(\"data\", {})\n        name = host.pop(\"name\", device_name)\n        hosts[name] = host\n        # add platform if not provided in device config context\n        if not host.get(\"platform\"):\n            if device[\"platform\"]:\n                host[\"platform\"] = device[\"platform\"][\"name\"]\n            else:\n                log.warning(f\"{self.name} - no platform found for '{name}' device\")\n        # add hostname if not provided in config context\n        if not host.get(\"hostname\"):\n            if device[\"primary_ip4\"] and primary_ip in [\"ip4\", \"ipv4\"]:\n                host[\"hostname\"] = device[\"primary_ip4\"][\"address\"].split(\"/\")[0]\n            elif device[\"primary_ip6\"] and primary_ip in [\"ip6\", \"ipv6\"]:\n                host[\"hostname\"] = device[\"primary_ip6\"][\"address\"].split(\"/\")[0]\n            else:\n                host[\"hostname\"] = name\n        # add netbox data to host's data\n        if nbdata is True:\n            host[\"data\"].update(device)\n\n    # add interfaces data\n    if interfaces:\n        # decide on get_interfaces arguments\n        kwargs = interfaces if isinstance(interfaces, dict) else {}\n        # add 'interfaces' key to all hosts' data\n        for host in hosts.values():\n            host[\"data\"].setdefault(\"interfaces\", {})\n        # query interfaces data from netbox\n        nb_interfaces = self.get_interfaces(\n            devices=list(hosts), instance=instance, **kwargs\n        )\n        # save interfaces data to hosts' inventory\n        while nb_interfaces.result:\n            device, device_interfaces = nb_interfaces.result.popitem()\n            hosts[device][\"data\"][\"interfaces\"] = device_interfaces\n\n    # add connections data\n    if connections:\n        # decide on get_interfaces arguments\n        kwargs = connections if isinstance(connections, dict) else {}\n        # add 'connections' key to all hosts' data\n        for host in hosts.values():\n            host[\"data\"].setdefault(\"connections\", {})\n        # query connections data from netbox\n        nb_connections = self.get_connections(\n            devices=list(hosts), instance=instance, **kwargs\n        )\n        # save connections data to hosts' inventory\n        while nb_connections.result:\n            device, device_connections = nb_connections.result.popitem()\n            hosts[device][\"data\"][\"connections\"] = device_connections\n\n    # add circuits data\n    if circuits:\n        # decide on get_interfaces arguments\n        kwargs = circuits if isinstance(circuits, dict) else {}\n        # add 'circuits' key to all hosts' data\n        for host in hosts.values():\n            host[\"data\"].setdefault(\"circuits\", {})\n        # query circuits data from netbox\n        nb_circuits = self.get_circuits(\n            devices=list(hosts), instance=instance, **kwargs\n        )\n        # save circuits data to hosts' inventory\n        while nb_circuits.result:\n            device, device_circuits = nb_circuits.result.popitem()\n            hosts[device][\"data\"][\"circuits\"] = device_circuits\n\n    return ret\n
    "},{"location":"netbox_worker_api_reference/#norfab.workers.netbox_worker.NetboxWorker.update_device_facts","title":"update_device_facts(instance=None, dry_run=False, via='nornir', timeout=60, **kwargs)","text":"

    Function to update device facts in Netbox using information provided by NAPALM get_facts getter:

    • serial number

    Parameters:

    Name Type Description Default instance str

    Netbox instance name

    None dry_run bool

    return information that would be pushed to Netbox but do not push it

    False via str

    service name to use to retrieve devices' data, default is nornir parse task

    'nornir' timeout int

    seconds to wait before timeout data retrieval job

    60 kwargs

    any additional arguments to send to service for device data retrieval

    {} Source code in norfab\\workers\\netbox_worker.py
    def update_device_facts(\n    self,\n    instance: str = None,\n    dry_run: bool = False,\n    via: str = \"nornir\",\n    timeout: int = 60,\n    **kwargs,\n):\n    \"\"\"\n    Function to update device facts in Netbox using information\n    provided by NAPALM get_facts getter:\n\n    - serial number\n    - software version\n    -\n\n    :param instance: Netbox instance name\n    :param dry_run: return information that would be pushed to Netbox but do not push it\n    :param via: service name to use to retrieve devices' data, default is nornir parse task\n    :param timeout: seconds to wait before timeout data retrieval job\n    :param kwargs: any additional arguments to send to service for device data retrieval\n    \"\"\"\n    result = {}\n    ret = Result(task=f\"{self.name}:push_device_facts\", result=result)\n    nb = self._get_pynetbox(instance)\n\n    if via == \"nornir\":\n        data = self.client.run_job(\n            \"nornir\",\n            \"parse\",\n            kwargs=kwargs,\n            workers=\"all\",\n            timeout=timeout,\n        )\n        for worker, results in data.items():\n            for host, host_data in results[\"result\"].items():\n                facts = host_data[\"napalm_get\"][\"get_facts\"]\n                nb_device = nb.dcim.devices.get(name=host)\n                if not nb_device:\n                    raise Exception(f\"'{host}' does not exist in Netbox\")\n                nb_device.serial = facts[\"serial_number\"]\n                if \"OS Version\" not in nb_device.comments:\n                    nb_device.comments += f\"\\nOS Version: {facts['os_version']}\"\n                nb_device.save()\n                result[host] = {\n                    \"update_device_facts\": {\n                        \"serial\": facts[\"serial_number\"],\n                        \"os_version\": facts[\"os_version\"],\n                    }\n                }\n    else:\n        raise UnsupportedServiceError(f\"'{via}' service not supported\")\n\n    return ret\n
    "},{"location":"netbox_worker_api_reference/#norfab.workers.netbox_worker.NetboxWorker.update_device_facts--software-version","title":"software version","text":""},{"location":"netbox_worker_api_reference/#norfab.workers.netbox_worker.NetboxWorker.get_next_ip","title":"get_next_ip(prefix, description=None, device=None, interface=None, vrf=None, interface_create=True, secondary=False, tags=None, dns_name=None, tenant=None, comments=None, instance=None, dry_run=False)","text":"

    Method to retrieve existing or allocate new IP address in Netbox.

    Parameters:

    Name Type Description Default prefix str

    IPv4 or IPv6 prefix e.g. 10.0.0.0/24 or prefix description to allocate next available IP Address from

    required description str

    IP address description to record in Netbox database

    None device str

    device name to find interface for and link IP address with

    None interface str

    interface name to link IP address with, device attribute also must be provided

    None Source code in norfab\\workers\\netbox_worker.py
    def get_next_ip(\n    self,\n    prefix: str,\n    description: str = None,\n    device: str = None,\n    interface: str = None,\n    vrf: str = None,\n    interface_create: bool = True,\n    secondary: bool = False,\n    tags: list = None,\n    dns_name: str = None,\n    tenant: str = None,\n    comments: str = None,\n    instance: str = None,\n    dry_run: bool = False,\n):\n    \"\"\"\n    Method to retrieve existing or allocate new IP address in Netbox.\n\n    :param prefix: IPv4 or IPv6 prefix e.g. ``10.0.0.0/24`` or prefix description\n        to allocate next available IP Address from\n    :param description: IP address description to record in Netbox database\n    :param device: device name to find interface for and link IP address with\n    :param interface: interface name to link IP address with, ``device`` attribute\n        also must be provided\n\n    \"\"\"\n    print(f\"!!!!!!!!!!!! prefix {prefix}, description {description}\")\n    nb = self._get_pynetbox(instance)\n    nb_prefix = nb.ipam.prefixes.get(prefix=prefix, vrf=vrf)\n    nb_ip = nb_prefix.available_ips.create()\n    if description is not None:\n        nb_ip.description = description\n    nb_ip.save()\n\n    return Result(result=str(nb_ip))\n
    "},{"location":"nfcli_client_api_reference/","title":"NFCLI (PICLE Shell) Client","text":""},{"location":"nfcli_client_api_reference/#norfab.clients.picle_shell_client--picle-shell-client","title":"PICLE Shell CLient","text":"

    Client that implements interactive shell to work with NorFab.

    "},{"location":"nfcli_client_api_reference/#norfab.clients.picle_shell_client.FileServiceCommands","title":"FileServiceCommands","text":"

    Bases: BaseModel

    "},{"location":"nfcli_client_api_reference/#norfab.clients.picle_shell_client.FileServiceCommands--sample-usage","title":"Sample Usage","text":""},{"location":"nfcli_client_api_reference/#norfab.clients.picle_shell_client.FileServiceCommands--copy","title":"copy","text":"

    Copy to client's fetched files directory:

    file copy_ url nf://cli/commands.txt

    Copy file to destination relative to current directory

    file copy_ url nf://cli/commands.txt destination commands.txt

    "},{"location":"nfcli_client_api_reference/#norfab.clients.picle_shell_client.FileServiceCommands--list","title":"list","text":"

    List files at broker root directory:

    file list file list url nf://

    List files details:

    file details\nfile details url nf://\n
    "},{"location":"nfcli_client_api_reference/#norfab.clients.picle_shell_client.NorFabShell","title":"NorFabShell","text":"

    Bases: BaseModel

    "},{"location":"nfcli_client_api_reference/#norfab.clients.picle_shell_client.NorFabShell.cmd_preloop_override","title":"cmd_preloop_override() classmethod","text":"

    This method called before CMD loop starts

    Source code in norfab\\clients\\picle_shell_client.py
    @classmethod\ndef cmd_preloop_override(self):\n    \"\"\"This method called before CMD loop starts\"\"\"\n    pass\n
    "},{"location":"norfab_architecture/","title":"NORFAB Architecture","text":""},{"location":"norfab_architecture/#high-level-design","title":"High Level Design","text":""},{"location":"norfab_architecture/#low-level-design","title":"Low Level Design","text":"

    Low level design revolves around resource oriented services - services that manage resources, where resources could be databases, network devices, file system etc.

    "},{"location":"norfab_architecture/#jobs-execution-flow","title":"Jobs Execution Flow","text":"

    There are multiple job flows implemented:

    • JOB POST FLOW - for clients to publish jobs to workers
    • JOB LOOP - job execution performed by workers
    • JOB GET FLOW - for clients to retrieve job execution results

    Above flows depicted on the diagram.

    "},{"location":"norfab_broker_reference/","title":"Broker","text":"

    Majordomo Protocol broker A minimal implementation of http:#rfc.zeromq.org/spec:7 and spec:8

    Author: Min RK benjaminrk@gmail.com Based on Java example by Arkadiusz Orzechowski

    "},{"location":"norfab_broker_reference/#norfab.core.broker.NFPService","title":"NFPService(name)","text":"

    Bases: object

    A single NFP Service

    Source code in norfab\\core\\broker.py
    def __init__(self, name: str):\n    self.name = name  # Service name\n    self.workers = []  # list of known workers\n
    "},{"location":"norfab_broker_reference/#norfab.core.broker.NFPWorker","title":"NFPWorker(address, socket, socket_lock, multiplier, keepalive, service=None, log_level='WARNING')","text":"

    Bases: object

    An NFP Worker convenience class

    Source code in norfab\\core\\broker.py
    def __init__(\n    self,\n    address: str,\n    socket,\n    socket_lock,\n    multiplier: int,  # e.g. 6 times\n    keepalive: int,  # e.g. 5000 ms\n    service: NFPService = None,\n    log_level: str = \"WARNING\",\n):\n    self.address = address  # Address to route to\n    self.service = service\n    self.ready = False\n    self.socket = socket\n    self.exit_event = threading.Event()\n    self.keepalive = keepalive\n    self.multiplier = multiplier\n    self.socket_lock = socket_lock\n    self.log_level = log_level\n
    "},{"location":"norfab_broker_reference/#norfab.core.broker.NFPWorker.is_ready","title":"is_ready()","text":"

    True if worker signaled W.READY

    Source code in norfab\\core\\broker.py
    def is_ready(self):\n    \"\"\"True if worker signaled W.READY\"\"\"\n    return self.service is not None and self.ready is True\n
    "},{"location":"norfab_broker_reference/#norfab.core.broker.NFPWorker.destroy","title":"destroy(disconnect=False)","text":"

    Clean up routine

    Source code in norfab\\core\\broker.py
    def destroy(self, disconnect=False):\n    \"\"\"Clean up routine\"\"\"\n    self.exit_event.set()\n    self.keepaliver.stop()\n    self.service.workers.remove(self)\n\n    if disconnect is True:\n        msg = [self.address, b\"\", NFP.WORKER, self.service.name, NFP.DISCONNECT]\n        with self.socket_lock:\n            self.socket.send_multipart(msg)\n
    "},{"location":"norfab_broker_reference/#norfab.core.broker.NFPBroker","title":"NFPBroker(endpoint, exit_event, inventory, log_level='WARNING', multiplier=6, keepalive=2500, base_dir='')","text":"

    NORFAB Protocol broker

    Initialize broker state.

    Source code in norfab\\core\\broker.py
    def __init__(\n    self,\n    endpoint: str,\n    exit_event: Event,\n    inventory: NorFabInventory,\n    log_level: str = \"WARNING\",\n    multiplier: int = 6,\n    keepalive: int = 2500,\n    base_dir: str = \"\",\n):\n    \"\"\"Initialize broker state.\"\"\"\n    log.setLevel(log_level.upper())\n    self.log_level = log_level\n    self.keepalive = keepalive\n    self.multiplier = multiplier\n\n    self.services = {}\n    self.workers = {}\n    self.exit_event = exit_event\n    self.inventory = inventory\n\n    self.ctx = zmq.Context()\n    self.socket = self.ctx.socket(zmq.ROUTER)\n    self.socket.linger = 0\n    self.poller = zmq.Poller()\n    self.poller.register(self.socket, zmq.POLLIN)\n    self.socket.bind(endpoint)\n    self.socket_lock = (\n        threading.Lock()\n    )  # used for keepalives to protect socket object\n\n    self.base_dir = base_dir or os.getcwd()\n    os.makedirs(self.base_dir, exist_ok=True)\n\n    log.debug(f\"NFPBroker - is read and listening on {endpoint}\")\n
    "},{"location":"norfab_broker_reference/#norfab.core.broker.NFPBroker.mediate","title":"mediate()","text":"

    Main broker work happens here

    Client send messages of this frame format:

    Source code in norfab\\core\\broker.py
    def mediate(self):\n    \"\"\"\n    Main broker work happens here\n\n    Client send messages of this frame format:\n\n\n    \"\"\"\n    while True:\n        try:\n            items = self.poller.poll(self.keepalive)\n        except KeyboardInterrupt:\n            break  # Interrupted\n\n        if items:\n            msg = self.socket.recv_multipart()\n            log.debug(f\"NFPBroker - received '{msg}'\")\n\n            sender = msg.pop(0)\n            empty = msg.pop(0)\n            header = msg.pop(0)\n\n            if header == NFP.CLIENT:\n                self.process_client(sender, msg)\n            elif header == NFP.WORKER:\n                self.process_worker(sender, msg)\n\n        self.purge_workers()\n\n        # check if need to stop\n        if self.exit_event.is_set():\n            self.destroy()\n            break\n
    "},{"location":"norfab_broker_reference/#norfab.core.broker.NFPBroker.destroy","title":"destroy()","text":"

    Disconnect all workers, destroy context.

    Source code in norfab\\core\\broker.py
    def destroy(self):\n    \"\"\"Disconnect all workers, destroy context.\"\"\"\n    log.info(f\"NFPBroker - interrupt received, killing broker\")\n    for name in list(self.workers.keys()):\n        # in case worker self destroyed while we iterating\n        if self.workers.get(name):\n            self.delete_worker(self.workers[name], True)\n    self.ctx.destroy(0)\n
    "},{"location":"norfab_broker_reference/#norfab.core.broker.NFPBroker.delete_worker","title":"delete_worker(worker, disconnect)","text":"

    Deletes worker from all data structures, and deletes worker.

    Source code in norfab\\core\\broker.py
    def delete_worker(self, worker, disconnect):\n    \"\"\"Deletes worker from all data structures, and deletes worker.\"\"\"\n    worker.destroy(disconnect)\n    self.workers.pop(worker.address, None)\n
    "},{"location":"norfab_broker_reference/#norfab.core.broker.NFPBroker.purge_workers","title":"purge_workers()","text":"

    Look for & delete expired workers.

    Source code in norfab\\core\\broker.py
    def purge_workers(self):\n    \"\"\"Look for & delete expired workers.\"\"\"\n    for name in list(self.workers.keys()):\n        # in case worker self destroyed while we iterating\n        if self.workers.get(name):\n            w = self.workers[name]\n        if not w.keepaliver.is_alive():\n            self.delete_worker(w, False)\n
    "},{"location":"norfab_broker_reference/#norfab.core.broker.NFPBroker.send_to_worker","title":"send_to_worker(worker, command, sender, uuid, data)","text":"

    Send message to worker. If message is provided, sends that message.

    Source code in norfab\\core\\broker.py
    def send_to_worker(\n    self, worker: NFPWorker, command: bytes, sender: bytes, uuid: bytes, data: bytes\n):\n    \"\"\"Send message to worker. If message is provided, sends that message.\"\"\"\n    # Stack routing and protocol envelopes to start of message\n    if command == NFP.POST:\n        msg = [worker.address, b\"\", NFP.WORKER, NFP.POST, sender, b\"\", uuid, data]\n    elif command == NFP.GET:\n        msg = [worker.address, b\"\", NFP.WORKER, NFP.GET, sender, b\"\", uuid, data]\n    else:\n        log.error(f\"NFPBroker - invalid worker command: {command}\")\n        return\n    with self.socket_lock:\n        log.debug(f\"NFPBroker - sending to worker '{msg}'\")\n        self.socket.send_multipart(msg)\n
    "},{"location":"norfab_broker_reference/#norfab.core.broker.NFPBroker.send_to_client","title":"send_to_client(client, command, service, message)","text":"

    Send message to client.

    Source code in norfab\\core\\broker.py
    def send_to_client(self, client: str, command: str, service: str, message: list):\n    \"\"\"Send message to client.\"\"\"\n    # Stack routing and protocol envelopes to start of message\n    if command == NFP.RESPONSE:\n        msg = [client, b\"\", NFP.CLIENT, NFP.RESPONSE, service] + message\n    elif command == NFP.EVENT:\n        msg = [client, b\"\", NFP.CLIENT, NFP.EVENT, service] + message\n    else:\n        log.error(f\"NFPBroker - invalid client command: {command}\")\n        return\n    with self.socket_lock:\n        log.debug(f\"NFPBroker - sending to client '{msg}'\")\n        self.socket.send_multipart(msg)\n
    "},{"location":"norfab_broker_reference/#norfab.core.broker.NFPBroker.process_worker","title":"process_worker(sender, msg)","text":"

    Process message received from worker.

    Source code in norfab\\core\\broker.py
    def process_worker(self, sender, msg):\n    \"\"\"Process message received from worker.\"\"\"\n    command = msg.pop(0)\n    worker = self.require_worker(sender)\n\n    if NFP.READY == command and not worker.is_ready():\n        service = msg.pop(0)\n        worker.service = self.require_service(service)\n        worker.ready = True\n        worker.start_keepalives()\n        worker.service.workers.append(worker)\n    elif NFP.RESPONSE == command and worker.is_ready():\n        client = msg.pop(0)\n        empty = msg.pop(0)\n        self.send_to_client(client, NFP.RESPONSE, worker.service.name, msg)\n    elif NFP.KEEPALIVE == command:\n        worker.keepaliver.received_heartbeat([worker.address] + msg)\n    elif NFP.DISCONNECT == command and worker.is_ready():\n        self.delete_worker(worker, False)\n    elif NFP.EVENT == command and worker.is_ready():\n        client = msg.pop(0)\n        empty = msg.pop(0)\n        self.send_to_client(client, NFP.EVENT, worker.service.name, msg)\n    elif not worker.is_ready():\n        self.delete_worker(worker, disconnect=True)\n    else:\n        log.error(f\"NFPBroker - invalid message: {msg}\")\n
    "},{"location":"norfab_broker_reference/#norfab.core.broker.NFPBroker.require_worker","title":"require_worker(address)","text":"

    Finds the worker, creates if necessary.

    Source code in norfab\\core\\broker.py
    def require_worker(self, address):\n    \"\"\"Finds the worker, creates if necessary.\"\"\"\n    if not self.workers.get(address):\n        self.workers[address] = NFPWorker(\n            address=address,\n            socket=self.socket,\n            multiplier=self.multiplier,\n            keepalive=self.keepalive,\n            socket_lock=self.socket_lock,\n            log_level=self.log_level,\n        )\n        log.info(f\"NFPBroker - registered new worker {address}\")\n\n    return self.workers[address]\n
    "},{"location":"norfab_broker_reference/#norfab.core.broker.NFPBroker.require_service","title":"require_service(name)","text":"

    Locates the service (creates if necessary).

    Source code in norfab\\core\\broker.py
    def require_service(self, name):\n    \"\"\"Locates the service (creates if necessary).\"\"\"\n    if not self.services.get(name):\n        service = NFPService(name)\n        self.services[name] = service\n        log.debug(f\"NFPBroker - registered new service {name}\")\n\n    return self.services[name]\n
    "},{"location":"norfab_broker_reference/#norfab.core.broker.NFPBroker.process_client","title":"process_client(sender, msg)","text":"

    Process a request coming from a client.

    Source code in norfab\\core\\broker.py
    def process_client(self, sender, msg):\n    \"\"\"Process a request coming from a client.\"\"\"\n    command = msg.pop(0)\n    service = msg.pop(0)\n    target = msg.pop(0)\n    uuid = msg.pop(0)\n    data = msg.pop(0)\n\n    # check if valid command from client\n    if command not in NFP.client_commands:\n        message = f\"NFPBroker - Unsupported client command '{command}'\"\n        log.error(message)\n        self.send_to_client(\n            sender, NFP.RESPONSE, service, [message.encode(\"utf-8\")]\n        )\n    # Management Interface\n    elif service == b\"mmi.service.broker\":\n        self.mmi_service(sender, command, target, uuid, data)\n    elif service == b\"sid.service.broker\":\n        self.inventory_service(sender, command, target, uuid, data)\n    elif service == b\"fss.service.broker\":\n        self.file_sharing_service(sender, command, target, uuid, data)\n    else:\n        self.dispatch(\n            sender, command, self.require_service(service), target, uuid, data\n        )\n
    "},{"location":"norfab_broker_reference/#norfab.core.broker.NFPBroker.filter_workers","title":"filter_workers(target, service)","text":"

    Helper function to filter workers

    Parameters:

    Name Type Description Default target bytes

    bytest string, workers target

    required service NFPService

    NFPService object

    required Source code in norfab\\core\\broker.py
    def filter_workers(self, target: bytes, service: NFPService) -> list:\n    \"\"\"\n    Helper function to filter workers\n\n    :param target: bytest string, workers target\n    :param service: NFPService object\n    \"\"\"\n    ret = []\n    if not service.workers:\n        log.warning(\n            f\"NFPBroker - '{service.name}' has no active workers registered, try later\"\n        )\n        ret = []\n    elif target == b\"any\":\n        ret = [service.workers[random.randint(0, len(service.workers) - 1)]]\n    elif target == b\"all\":\n        ret = service.workers\n    elif target in self.workers:  # single worker\n        ret = [self.workers[target]]\n    else:  # target list of workers\n        try:\n            target = json.loads(target)\n            if isinstance(target, list):\n                for w in target:\n                    w = w.encode(\"utf-8\")\n                    if w in self.workers:\n                        ret.append(self.workers[w])\n                ret = list(set(ret))  # dedup workers\n        except Exception as e:\n            log.error(\n                f\"NFPBroker - Failed to load target '{target}' with error '{e}'\"\n            )\n    return ret\n
    "},{"location":"norfab_broker_reference/#norfab.core.broker.NFPBroker.dispatch","title":"dispatch(sender, command, service, target, uuid, data)","text":"

    Dispatch requests to waiting workers as possible

    Parameters:

    Name Type Description Default service

    service object

    required target

    string indicating workers addresses to dispatch to

    required msg

    string with work request content

    required Source code in norfab\\core\\broker.py
    def dispatch(self, sender, command, service, target, uuid, data):\n    \"\"\"\n    Dispatch requests to waiting workers as possible\n\n    :param service: service object\n    :param target: string indicating workers addresses to dispatch to\n    :param msg: string with work request content\n    \"\"\"\n    log.debug(\n        f\"NFPBroker - dispatching request to workers: sender '{sender}', \"\n        f\"command '{command}', service '{service.name}', target '{target}'\"\n        f\"data '{data}', uuid '{uuid}'\"\n    )\n    self.purge_workers()\n    workers = self.filter_workers(target, service)\n\n    # handle case when service has no workers registered\n    if not workers:\n        message = f\"NFPBroker - {service.name} service failed to target workers '{target}'\"\n        log.error(message)\n        self.send_to_client(\n            sender,\n            NFP.RESPONSE,\n            service.name,\n            [uuid, b\"400\", message.encode(\"utf-8\")],\n        )\n    else:\n        # inform client that JOB dispatched\n        w_addresses = [w.address.decode(\"utf-8\") for w in workers]\n        self.send_to_client(\n            sender,\n            NFP.RESPONSE,\n            service.name,\n            [\n                uuid,\n                b\"202\",\n                json.dumps(\n                    {\n                        \"workers\": w_addresses,\n                        \"uuid\": uuid.decode(\"utf-8\"),\n                        \"target\": target.decode(\"utf-8\"),\n                        \"status\": \"DISPATCHED\",\n                        \"service\": service.name.decode(\"utf-8\"),\n                    }\n                ).encode(\"utf-8\"),\n            ],\n        )\n        # send job to workers\n        for worker in workers:\n            self.send_to_worker(worker, command, sender, uuid, data)\n
    "},{"location":"norfab_broker_reference/#norfab.core.broker.NFPBroker.mmi_service","title":"mmi_service(sender, command, target, uuid, data)","text":"

    Handle internal service according to 8/MMI specification

    Source code in norfab\\core\\broker.py
    def mmi_service(self, sender, command, target, uuid, data):\n    \"\"\"Handle internal service according to 8/MMI specification\"\"\"\n    log.debug(\n        f\"mmi.service.broker - processing request: sender '{sender}', \"\n        f\"command '{command}', target '{target}'\"\n        f\"data '{data}', uuid '{uuid}'\"\n    )\n    data = json.loads(data)\n    task = data.get(\"task\")\n    args = data.get(\"args\", [])\n    kwargs = data.get(\"kwargs\", {})\n    ret = f\"Unsupported task '{task}'\"\n    if task == \"show_workers\":\n        if self.workers:\n            ret = [\n                {\n                    \"name\": w.address.decode(\"utf-8\"),\n                    \"service\": w.service.name.decode(\"utf-8\"),\n                    \"status\": \"alive\" if w.keepaliver.is_alive() else \"dead\",\n                    \"holdtime\": str(w.keepaliver.show_holdtime()),\n                    \"keepalives tx/rx\": f\"{w.keepaliver.keepalives_send} / {w.keepaliver.keepalives_received}\",\n                    \"alive (s)\": str(w.keepaliver.show_alive_for()),\n                }\n                for k, w in self.workers.items()\n            ]\n            # filter reply\n            service = kwargs.get(\"service\")\n            status = kwargs.get(\"status\")\n            if service and service != \"all\":\n                ret = [w for w in ret if w[\"service\"] == service]\n            if status in [\"alive\", \"dead\"]:\n                ret = [w for w in ret if w[\"status\"] == status]\n            if not ret:\n                ret = [{\"name\": \"\", \"service\": \"\", \"status\": \"\"}]\n        else:\n            ret = [{\"name\": \"\", \"service\": \"\", \"status\": \"\"}]\n    elif task == \"show_broker\":\n        ret = {\n            \"address\": self.socket.getsockopt_string(zmq.LAST_ENDPOINT),\n            \"status\": \"active\",\n            \"multiplier\": self.multiplier,\n            \"keepalive\": self.keepalive,\n            \"workers count\": len(self.workers),\n            \"services count\": len(self.services),\n            \"base_dir\": self.base_dir,\n        }\n    reply = json.dumps(ret).encode(\"utf-8\")\n    self.send_to_client(\n        sender, NFP.RESPONSE, b\"mmi.service.broker\", [uuid, b\"200\", reply]\n    )\n
    "},{"location":"norfab_cli_overview/","title":"NORFAB CLI","text":"

    NorFab CLI designed as a modal operating system. The term modal describes a system that has various modes of operation, each having its own domain of operation. The CLI uses a hierarchical structure for the modes.

    You can access a lower-level mode only from a higher-level mode. For example, to access the Nornir mode, you must be in the privileged EXEC mode. Each mode is used to accomplish particular tasks and has a specific set of commands that are available in this mode. For example, to configure a router interface, you must be in Nornir configuration mode. All configurations that you enter in configuration mode apply only to this function.

    NorFab CLI build using PICLE package.

    It is important to remember that in PICLE Shell, when you enter a command, the command is executed. If you enter an incorrect command in a production environment, it can negatively affect it.

    "},{"location":"norfab_client_reference/","title":"Client","text":""},{"location":"norfab_client_reference/#norfab.core.client--cudos","title":"CUDOS","text":"

    Inspired by Majordomo Protocol Client API, ZeroMQ, Python version.

    Original MDP/Client spec

    Location: http://rfc.zeromq.org/spec:7.

    Author: Min RK benjaminrk@gmail.com

    Based on Java example by Arkadiusz Orzechowski

    "},{"location":"norfab_client_reference/#norfab.core.client.NFPClient","title":"NFPClient(broker, name, log_level='WARNING', exit_event=None, event_queue=None)","text":"

    Bases: object

    NORFAB Protocol Client API.

    Parameters:

    Name Type Description Default broker

    str, broker endpoint e.g. tcp://127.0.0.1:5555

    required name

    str, client name, default is NFPClient

    required Source code in norfab\\core\\client.py
    def __init__(\n    self, broker, name, log_level=\"WARNING\", exit_event=None, event_queue=None\n):\n    log.setLevel(log_level.upper())\n    self.name = name\n    self.zmq_name = f\"{self.name}-{uuid4().hex}\"\n    self.broker = broker\n    self.ctx = zmq.Context()\n    self.poller = zmq.Poller()\n    self.reconnect_to_broker()\n    self.base_dir = f\"__norfab__/files/client/{self.name}/\"\n    self.base_dir_jobs = os.path.join(self.base_dir, \"jobs\")\n    self.events_dir = os.path.join(self.base_dir, \"events\")\n\n    # create all the folders\n    os.makedirs(self.base_dir, exist_ok=True)\n    os.makedirs(self.base_dir_jobs, exist_ok=True)\n    os.makedirs(self.events_dir, exist_ok=True)\n\n    # create queue file\n    self.queue_filename = os.path.join(\n        self.base_dir_jobs, f\"{self.name}.jobsqueue.txt\"\n    )\n    if not os.path.exists(self.queue_filename):\n        with open(self.queue_filename, \"w\") as f:\n            pass\n\n    self.exit_event = exit_event or threading.Event()\n    self.recv_queue = queue.Queue(maxsize=0)\n    self.event_queue = event_queue or queue.Queue(maxsize=1000)\n\n    # start receive thread\n    self.recv_thread = threading.Thread(\n        target=recv, daemon=True, name=f\"{self.name}_recv_thread\", args=(self,)\n    ).start()\n
    "},{"location":"norfab_client_reference/#norfab.core.client.NFPClient.reconnect_to_broker","title":"reconnect_to_broker()","text":"

    Connect or reconnect to broker

    Source code in norfab\\core\\client.py
    def reconnect_to_broker(self):\n    \"\"\"Connect or reconnect to broker\"\"\"\n    if self.broker_socket:\n        self.poller.unregister(self.broker_socket)\n        self.broker_socket.close()\n    self.broker_socket = self.ctx.socket(zmq.DEALER)\n    self.broker_socket.setsockopt_unicode(zmq.IDENTITY, self.zmq_name, \"utf8\")\n    self.broker_socket.linger = 0\n    self.broker_socket.connect(self.broker)\n    self.poller.register(self.broker_socket, zmq.POLLIN)\n    log.debug(f\"{self.name} - client connected to broker at '{self.broker}'\")\n    self.stats_reconnect_to_broker += 1\n
    "},{"location":"norfab_client_reference/#norfab.core.client.NFPClient.send_to_broker","title":"send_to_broker(command, service, workers, uuid, request)","text":"

    Send message to broker.

    Source code in norfab\\core\\client.py
    def send_to_broker(self, command, service, workers, uuid, request):\n    \"\"\"Send message to broker.\"\"\"\n    if command == NFP.POST:\n        msg = [b\"\", NFP.CLIENT, command, service, workers, uuid, request]\n    elif command == NFP.GET:\n        msg = [b\"\", NFP.CLIENT, command, service, workers, uuid, request]\n    else:\n        log.error(\n            f\"{self.name} - cannot send '{command}' to broker, command unsupported\"\n        )\n        return\n\n    log.debug(f\"{self.name} - sending '{msg}'\")\n\n    self.broker_socket.send_multipart(msg)\n    self.stats_send_to_broker += 1\n
    "},{"location":"norfab_client_reference/#norfab.core.client.NFPClient.rcv_from_broker","title":"rcv_from_broker(command, service, uuid)","text":"

    Wait for response from broker.

    Source code in norfab\\core\\client.py
    def rcv_from_broker(self, command, service, uuid):\n    \"\"\"Wait for response from broker.\"\"\"\n    retries = 3\n    while retries > 0:\n        # check if need to stop\n        if self.exit_event.is_set():\n            break\n        try:\n            msg = self.recv_queue.get(block=True, timeout=3)\n            self.recv_queue.task_done()\n        except queue.Empty:\n            if retries:\n                log.warning(\n                    f\"{self.name} - '{uuid}:{service}:{command}' job, \"\n                    f\"no reply from broker '{self.broker}', reconnecting\"\n                )\n                self.reconnect_to_broker()\n            retries -= 1\n            continue\n\n        (\n            empty,\n            reply_header,\n            reply_command,\n            reply_service,\n            reply_uuid,\n            reply_status,\n            reply_task_result,\n        ) = msg\n\n        # find message from recv queue for given uuid\n        if reply_uuid == uuid:\n            assert (\n                reply_header == NFP.CLIENT\n            ), f\"Was expecting client header '{NFP.CLIENT}' received '{reply_header}'\"\n            assert (\n                reply_command == command\n            ), f\"Was expecting reply command '{command}' received '{reply_command}'\"\n            assert (\n                reply_service == service\n            ), f\"Was expecting reply from '{service}' but received reply from '{reply_service}' service\"\n\n            return reply_status, reply_task_result\n        else:\n            self.recv_queue.put(msg)\n    else:\n        log.error(\n            f\"{self.name} - '{uuid}:{service}:{command}' job, \"\n            f\"client {retries} retries attempts exceeded\"\n        )\n        return b\"408\", b'{\"status\": \"Request Timeout\"}'\n
    "},{"location":"norfab_client_reference/#norfab.core.client.NFPClient.post","title":"post(service, task, args=None, kwargs=None, workers='all', uuid=None, timeout=600)","text":"

    Send job request to broker.

    Return dictionary with status, workers, errors keys containing list of workers acknowledged POST request.

    Source code in norfab\\core\\client.py
    def post(\n    self,\n    service: str,\n    task: str,\n    args: list = None,\n    kwargs: dict = None,\n    workers: str = \"all\",\n    uuid: hex = None,\n    timeout: int = 600,\n):\n    \"\"\"\n    Send job request to broker.\n\n    Return dictionary with ``status``, ``workers``, ``errors`` keys\n    containing list of workers acknowledged POST request.\n    \"\"\"\n    uuid = uuid or uuid4().hex\n    args = args or []\n    kwargs = kwargs or {}\n    ret = {\"status\": b\"200\", \"workers\": [], \"errors\": []}\n\n    if not isinstance(service, bytes):\n        service = service.encode(\"utf-8\")\n\n    if not isinstance(uuid, bytes):\n        uuid = uuid.encode(\"utf-8\")\n\n    workers = self._make_workers(workers)\n\n    request = json.dumps(\n        {\"task\": task, \"kwargs\": kwargs or {}, \"args\": args or []}\n    ).encode(\"utf-8\")\n\n    # run POST response loop\n    start_time = time.time()\n    while timeout > time.time() - start_time:\n        # check if need to stop\n        if self.exit_event.is_set():\n            return ret\n        self.send_to_broker(\n            NFP.POST, service, workers, uuid, request\n        )  # 1 send POST to broker\n        status, post_response = self.rcv_from_broker(\n            NFP.RESPONSE, service, uuid\n        )  # 2 receive RESPONSE from broker\n        if status == b\"202\":  # 3 go over RESPONSE status and decide what to do\n            break\n        else:\n            msg = f\"{self.name} - '{uuid}' job, POST Request not accepted by broker '{post_response}'\"\n            log.error(msg)\n            ret[\"errors\"].append(msg)\n            ret[\"status\"] = status\n            return ret\n    else:\n        msg = f\"{self.name} - '{uuid}' job, broker POST Request Timeout\"\n        log.error(msg)\n        ret[\"errors\"].append(msg)\n        ret[\"status\"] = b\"408\"\n        return ret\n\n    # get a list of workers where job was dispatched to\n    post_response = json.loads(post_response)\n    workers_dispatched = set(post_response[\"workers\"])\n    log.debug(\n        f\"{self.name} - broker dispatched job '{uuid}' POST request to workers {workers_dispatched}\"\n    )\n\n    # wait workers to ACK POSTed job\n    start_time = time.time()\n    workers_acked = set()\n    while timeout > time.time() - start_time:\n        # check if need to stop\n        if self.exit_event.is_set():\n            return ret\n        status, response = self.rcv_from_broker(NFP.RESPONSE, service, uuid)\n        response = json.loads(response)\n        if status == b\"202\":  # ACCEPTED\n            log.debug(\n                f\"{self.name} - '{uuid}' job, acknowledged by worker '{response}'\"\n            )\n            workers_acked.add(response[\"worker\"])\n            if workers_acked == workers_dispatched:\n                break\n        else:\n            msg = (\n                f\"{self.name} - '{uuid}:{service}:{task}' job, \"\n                f\"unexpected POST request status '{status}', response '{response}'\"\n            )\n            log.error(msg)\n            ret[\"errors\"].append(msg)\n    else:\n        msg = (\n            f\"{self.name} - '{uuid}' job, POST request timeout exceeded, these workers did not \"\n            f\"acknowledge the job {workers_dispatched - workers_acked}\"\n        )\n        log.error(msg)\n        ret[\"errors\"].append(msg)\n        ret[\"status\"] = b\"408\"\n\n    ret[\"workers\"] = list(workers_acked)\n    ret[\"status\"] = ret[\"status\"].decode(\"utf-8\")\n\n    log.debug(f\"{self.name} - '{uuid}' job POST request completed '{ret}'\")\n\n    return ret\n
    "},{"location":"norfab_client_reference/#norfab.core.client.NFPClient.get","title":"get(service, task, args=None, kwargs=None, workers='all', uuid=None, timeout=600)","text":"

    S end job reply message to broker requesting job results.

    Parameters:

    Name Type Description Default service str

    mandatory, service name to target

    required task str

    mandatory, service task name to run

    required args list

    optional, list of position argument for the task

    None kwargs dict

    optional, dictionary of key-word arguments for the task

    None workers str

    optional, workers to target - all, any, or list of workers names

    'all' uuid hex

    optional, unique job identifier

    None timeout int

    optional, job timeout in seconds, for how long client waits for job result before giving up Returns dictionary of status, results and errors keys, where results key is a dictionary keyed by workers' names, and errors is a list of error strings.

    600 Source code in norfab\\core\\client.py
    def get(\n    self,\n    service: str,\n    task: str,\n    args: list = None,\n    kwargs: dict = None,\n    workers: str = \"all\",\n    uuid: hex = None,\n    timeout: int = 600,\n):\n    \"\"\"S\n    end job reply message to broker requesting job results.\n\n    :param service: mandatory, service name to target\n    :param task: mandatory, service task name to run\n    :param args: optional, list of position argument for the task\n    :param kwargs: optional, dictionary of key-word arguments for the task\n    :param workers: optional, workers to target - ``all``, ``any``, or\n        list of workers names\n    :param uuid: optional, unique job identifier\n    :param timeout: optional, job timeout in seconds, for how long client\n        waits for job result before giving up\n\n    Returns dictionary of ``status``, ``results`` and ``errors`` keys,\n    where ``results`` key is a dictionary keyed by workers' names, and\n    ``errors`` is a list of error strings.\n    \"\"\"\n    uuid = uuid or uuid4().hex\n    args = args or []\n    kwargs = kwargs or {}\n    wkrs = {\n        \"requested\": workers,\n        \"done\": set(),\n        \"dispatched\": set(),\n        \"pending\": set(),\n    }\n    ret = {\"status\": b\"200\", \"results\": {}, \"errors\": [], \"workers\": wkrs}\n\n    if not isinstance(service, bytes):\n        service = service.encode(\"utf-8\")\n\n    if not isinstance(uuid, bytes):\n        uuid = uuid.encode(\"utf-8\")\n\n    workers = self._make_workers(workers)\n\n    request = json.dumps(\n        {\"task\": task, \"kwargs\": kwargs or {}, \"args\": args or []}\n    ).encode(\"utf-8\")\n\n    # run GET response loop\n    start_time = time.time()\n    while timeout > time.time() - start_time:\n        # check if need to stop\n        if self.exit_event.is_set():\n            return None\n        # dispatch GET request to workers\n        self.send_to_broker(NFP.GET, service, workers, uuid, request)\n        status, get_response = self.rcv_from_broker(NFP.RESPONSE, service, uuid)\n        ret[\"status\"] = status\n        # received actual GET request results from broker e.g. MMI, SID or FSS services\n        if status == b\"200\":\n            ret[\"results\"] = get_response.decode(\"utf-8\")\n            break\n        # received DISPATCH response from broker\n        if status != b\"202\":\n            msg = f\"{status}, {self.name} job '{uuid}' GET Request not accepted by broker '{get_response}'\"\n            log.error(msg)\n            ret[\"errors\"].append(msg)\n            break\n        get_response = json.loads(get_response)\n        wkrs[\"dispatched\"] = set(get_response[\"workers\"])\n        # collect GET responses from individual workers\n        workers_responded = set()\n        while timeout > time.time() - start_time:\n            # check if need to stop\n            if self.exit_event.is_set():\n                return None\n            status, response = self.rcv_from_broker(NFP.RESPONSE, service, uuid)\n            log.debug(\n                f\"{self.name} - job '{uuid}' response from worker '{response}'\"\n            )\n            response = json.loads(response)\n            if status == b\"200\":  # OK\n                ret[\"results\"].update(response)\n                log.debug(\n                    f\"{self.name} - job '{uuid}' results returned by worker '{response}'\"\n                )\n                for w in response.keys():\n                    wkrs[\"done\"].add(w)\n                    workers_responded.add(w)\n                    if w in wkrs[\"pending\"]:\n                        wkrs[\"pending\"].remove(w)\n                if wkrs[\"done\"] == wkrs[\"dispatched\"]:\n                    break\n            elif status == b\"300\":  # PENDING\n                # set status to pending if at least one worker is pending\n                ret[\"status\"] = b\"300\"\n                wkrs[\"pending\"].add(response[\"worker\"])\n                workers_responded.add(response[\"worker\"])\n            else:\n                if response.get(\"worker\"):\n                    workers_responded.add(response[\"worker\"])\n                msg = (\n                    f\"{self.name} - '{uuid}:{service}:{task}' job, \"\n                    f\"unexpected GET Response status '{status}', response '{response}'\"\n                )\n                log.error(msg)\n                ret[\"errors\"].append(msg)\n            if workers_responded == wkrs[\"dispatched\"]:\n                break\n        if wkrs[\"done\"] == wkrs[\"dispatched\"]:\n            break\n        time.sleep(0.2)\n    else:\n        msg = f\"{self.name} - '{uuid}' job, broker {timeout}s GET request timeout expired\"\n        log.error(msg)\n        ret[\"errors\"].append(msg)\n        ret[\"status\"] = b\"408\"\n\n    ret[\"status\"] = ret[\"status\"].decode(\"utf-8\")\n\n    return ret\n
    "},{"location":"norfab_client_reference/#norfab.core.client.NFPClient.get_iter","title":"get_iter(service, task, args=None, kwargs=None, workers='all', uuid=None, timeout=600)","text":"

    Send job reply message to broker requesting job results.

    Source code in norfab\\core\\client.py
    def get_iter(\n    self,\n    service: str,\n    task: str,\n    args: list = None,\n    kwargs: dict = None,\n    workers: str = \"all\",\n    uuid: hex = None,\n    timeout: int = 600,\n):\n    \"\"\"Send job reply message to broker requesting job results.\"\"\"\n    uuid = uuid or uuid4().hex\n    args = args or []\n    kwargs = kwargs or {}\n\n    if not isinstance(service, bytes):\n        service = service.encode(\"utf-8\")\n\n    if not isinstance(uuid, bytes):\n        uuid = uuid.encode(\"utf-8\")\n\n    workers = self._make_workers(workers)\n\n    request = json.dumps(\n        {\"task\": task, \"kwargs\": kwargs or {}, \"args\": args or []}\n    ).encode(\"utf-8\")\n\n    # run GET response loop\n    start_time = time.time()\n    workers_done = set()\n    while timeout > time.time() - start_time:\n        # check if need to stop\n        if self.exit_event.is_set():\n            break\n        # dispatch GET request to workers\n        self.send_to_broker(NFP.GET, service, workers, uuid, request)\n        status, get_response = self.rcv_from_broker(NFP.RESPONSE, service, uuid)\n        # received DISPATCH response from broker\n        if status != b\"202\":\n            msg = f\"{status}, {self.name} job '{uuid}' GET Request not accepted by broker '{get_response}'\"\n            log.error(msg)\n            break\n        get_response = json.loads(get_response)\n        workers_dispatched = set(get_response[\"workers\"])\n        # collect GET responses from workers\n        workers_responded = set()\n        while timeout > time.time() - start_time:\n            # check if need to stop\n            if self.exit_event.is_set():\n                break\n            status, response = self.rcv_from_broker(NFP.RESPONSE, service, uuid)\n            log.debug(\n                f\"{self.name} - job '{uuid}' response from worker '{response}'\"\n            )\n            response = json.loads(response)\n            if status == b\"200\":  # OK\n                log.debug(\n                    f\"{self.name} - job '{uuid}' results returned by worker '{response}'\"\n                )\n                yield response\n                for w in response.keys():\n                    workers_done.add(w)\n                    workers_responded.add(w)\n                if workers_done == workers_dispatched:\n                    break\n            elif status == b\"300\":  # PENDING\n                workers_responded.add(response[\"worker\"])\n            else:\n                msg = f\"{self.name} - unexpected GET Response status '{status}', response '{response}'\"\n                log.error(msg)\n                ret[\"errors\"].append(msg)\n            if workers_responded == workers_dispatched:\n                break\n        if workers_done == workers_dispatched:\n            break\n        time.sleep(0.2)\n    else:\n        msg = f\"408, {self.name} job '{uuid}' broker GET Request Timeout\"\n        log.error(msg)\n
    "},{"location":"norfab_client_reference/#norfab.core.client.NFPClient.fetch_file","title":"fetch_file(url, destination=None, chunk_size=250000, pipiline=10, timeout=600, read=False)","text":"

    Function to download file from Broker File Sharing Service.

    Parameters:

    Name Type Description Default url str

    (str), path to file relative to base_dir

    required destination str

    (str), if provided destination to save file, returns file content otherwise

    None Source code in norfab\\core\\client.py
    def fetch_file(\n    self,\n    url: str,\n    destination: str = None,\n    chunk_size: int = 250000,\n    pipiline: int = 10,\n    timeout: int = 600,\n    read: bool = False,\n):\n    \"\"\"\n    Function to download file from Broker File Sharing Service.\n\n    :param url: (str), path to file relative to ``base_dir``\n    :param destination: (str), if provided destination to save file,\n        returns file content otherwise\n    \"\"\"\n    uuid = str(uuid4().hex).encode(\"utf-8\")\n    total = 0  # Total bytes received\n    chunks = 0  # Total chunks received\n    offset = 0  # Offset of next chunk request\n    credit = pipiline  # Up to PIPELINE chunks in transit\n    service = b\"fss.service.broker\"\n    workers = b\"any\"\n    reply = \"\"\n    status = \"200\"\n    downloaded = False\n    md5hash = None\n\n    # define file destination\n    if destination is None:\n        destination = os.path.join(\n            self.base_dir, \"fetchedfiles\", *os.path.split(url.replace(\"nf://\", \"\"))\n        )\n\n    # make sure all destination directories exist\n    os.makedirs(os.path.split(destination)[0], exist_ok=True)\n\n    # get file details\n    request = json.dumps({\"task\": \"file_details\", \"kwargs\": {\"url\": url}}).encode(\n        \"utf-8\"\n    )\n    self.send_to_broker(NFP.GET, service, workers, uuid, request)\n    rcv_status, file_details = self.rcv_from_broker(NFP.RESPONSE, service, uuid)\n    file_details = json.loads(file_details)\n\n    # check if file already downloaded\n    if os.path.isfile(destination):\n        file_hash = hashlib.md5()\n        with open(destination, \"rb\") as f:\n            chunk = f.read(8192)\n            while chunk:\n                file_hash.update(chunk)\n                chunk = f.read(8192)\n        md5hash = file_hash.hexdigest()\n        downloaded = md5hash == file_details[\"md5hash\"]\n        log.debug(f\"{self.name} - file already downloaded, nothing to do\")\n\n    # fetch file content from broker and save to local file\n    if file_details[\"exists\"] is True and downloaded is False:\n        file_hash = hashlib.md5()\n        with open(destination, \"wb\") as dst_file:\n            start_time = time.time()\n            while timeout > time.time() - start_time:\n                # check if need to stop\n                if self.exit_event.is_set():\n                    return \"400\", \"\"\n                # ask for chunks\n                while credit:\n                    request = json.dumps(\n                        {\n                            \"task\": \"fetch_file\",\n                            \"kwargs\": {\n                                \"offset\": offset,\n                                \"chunk_size\": chunk_size,\n                                \"url\": url,\n                            },\n                        }\n                    ).encode(\"utf-8\")\n                    self.send_to_broker(NFP.GET, service, workers, uuid, request)\n                    offset += chunk_size\n                    credit -= 1\n                # receive chunks from broker\n                status, chunk = self.rcv_from_broker(NFP.RESPONSE, service, uuid)\n                log.debug(\n                    f\"{self.name} - status '{status}', chunk '{chunks}', downloaded '{total}'\"\n                )\n                dst_file.write(chunk)\n                file_hash.update(chunk)\n                chunks += 1\n                credit += 1\n                size = len(chunk)\n                total += size\n                if size < chunk_size:\n                    break  # Last chunk received; exit\n            else:\n                reply = \"File download failed - timeout\"\n                status = \"408\"\n        # verify md5hash\n        md5hash = file_hash.hexdigest()\n    elif file_details[\"exists\"] is False:\n        reply = \"File download failed - file not found\"\n        status = \"404\"\n\n    # decide on what to reply and status\n    if file_details[\"exists\"] is not True:\n        reply = reply\n    elif md5hash != file_details[\"md5hash\"]:\n        reply = \"File download failed - MD5 hash mismatch\"\n        status = \"417\"\n    elif read:\n        with open(destination, \"r\", encoding=\"utf-8\") as f:\n            reply = f.read()\n    else:\n        reply = destination\n    # decode status\n    if isinstance(status, bytes):\n        status = status.decode(\"utf-8\")\n\n    return status, reply\n
    "},{"location":"norfab_client_reference/#norfab.core.client.NFPClient.run_job","title":"run_job(service, task, uuid=None, args=None, kwargs=None, workers='all', timeout=600, retry=10)","text":"

    Run job and return results produced by workers.

    Parameters:

    Name Type Description Default service str

    str, service name to send request to

    required task str

    str, task name to run for given service

    required uuid str

    (str) Job ID to use

    None args list

    list, task arguments

    None kwargs dict

    dict, task key-word arguments

    None workers str

    str or list, worker names to target

    'all' timeout int

    overall job timeout in seconds

    600 retry

    number of times to try and GET job results

    10 Source code in norfab\\core\\client.py
    def run_job(\n    self,\n    service: str,\n    task: str,\n    uuid: str = None,\n    args: list = None,\n    kwargs: dict = None,\n    workers: str = \"all\",\n    timeout: int = 600,\n    retry=10,\n):\n    \"\"\"\n    Run job and return results produced by workers.\n\n    :param service: str, service name to send request to\n    :param task: str, task name to run for given service\n    :param uuid: (str) Job ID to use\n    :param args: list, task arguments\n    :param kwargs: dict, task key-word arguments\n    :param workers: str or list, worker names to target\n    :param timeout: overall job timeout in seconds\n    :param retry: number of times to try and GET job results\n    \"\"\"\n    uuid = uuid or uuid4().hex\n    start_time = int(time.time())\n\n    # POST job to workers\n    post_result = self.post(service, task, args, kwargs, workers, uuid, timeout)\n    if post_result[\"status\"] != \"200\":\n        log.error(\n            f\"{self.name}:run_job - {service}:{task} POST status \"\n            f\"to '{workers}' workers is not 200 - '{post_result}'\"\n        )\n        return None\n\n    remaining_timeout = timeout - (time.time() - start_time)\n    get_timeout = remaining_timeout / retry\n\n    # GET job results\n    while retry:\n        get = self.get(\n            service, task, [], {}, post_result[\"workers\"], uuid, get_timeout\n        )\n        if self.exit_event.is_set():\n            break\n        elif get[\"status\"] == \"300\":  # PENDING\n            retry -= 1\n            log.debug(\n                f\"{self.name}:run_job - {service}:{task}:{uuid} GET \"\n                f\"results pending, keep waiting\"\n            )\n            continue\n        elif get[\"status\"] == \"408\":  # TIMEOUT\n            retry -= 1\n            log.debug(\n                f\"{self.name}:run_job - {service}:{task}:{uuid} GET \"\n                f\"results {get_timeout}s timeout expired, keep waiting\"\n            )\n            continue\n        elif get[\"status\"] in [\"200\", \"202\"]:  # OK\n            return get[\"results\"]\n        else:\n            log.error(\n                f\"{self.name}:run_job - {service}:{task}:{uuid} \"\n                f\"stopping, GET returned unexpected results - '{get}'\"\n            )\n            return None\n
    "},{"location":"norfab_client_reference/#norfab.core.client.NFPClient.run_job_iter","title":"run_job_iter(service, task, uuid=None, args=None, kwargs=None, workers='all', timeout=600)","text":"

    Iter run_job allows to return job results from workers progressively as they are responded, rather than waiting for workers to respond first. This should allow to client an interactive experience for the user where job results would be presented as soon as they are available.

    Parameters:

    Name Type Description Default service str

    str, service name to send request to

    required task str

    str, task name to run for given service

    required uuid str

    (str) Job ID to use

    None args list

    list, task arguments

    None kwargs dict

    dict, task key-word arguments

    None workers str

    str or list, worker names to target

    'all' Source code in norfab\\core\\client.py
    def run_job_iter(\n    self,\n    service: str,\n    task: str,\n    uuid: str = None,\n    args: list = None,\n    kwargs: dict = None,\n    workers: str = \"all\",\n    timeout: int = 600,\n):\n    \"\"\"\n    Iter run_job allows to return job results from workers progressively\n    as they are responded, rather than waiting for workers to respond first.\n    This should allow to client an interactive experience for the user where\n    job results would be presented as soon as they are available.\n\n    :param service: str, service name to send request to\n    :param task: str, task name to run for given service\n    :param uuid: (str) Job ID to use\n    :param args: list, task arguments\n    :param kwargs: dict, task key-word arguments\n    :param workers: str or list, worker names to target\n    \"\"\"\n    uuid = uuid or uuid4().hex\n\n    # POST job to workers\n    post_result = self.post(service, task, args, kwargs, workers, uuid, timeout)\n\n    # GET job results\n    for result in self.get_iter(\n        service, task, [], {}, post_result[\"workers\"], uuid, timeout\n    ):\n        yield result\n
    "},{"location":"norfab_client_reference/#norfab.core.client.event_filename","title":"event_filename(suuid, events_dir)","text":"

    Returns freshly allocated event filename for given UUID str

    Source code in norfab\\core\\client.py
    def event_filename(suuid: str, events_dir: str):\n    \"\"\"Returns freshly allocated event filename for given UUID str\"\"\"\n    suuid = suuid.decode(\"utf-8\") if isinstance(suuid, bytes) else suuid\n    return os.path.join(events_dir, f\"{suuid}.json\")\n
    "},{"location":"norfab_client_reference/#norfab.core.client.recv","title":"recv(client)","text":"

    Thread to process receive messages from broker.

    Source code in norfab\\core\\client.py
    def recv(client):\n    \"\"\"Thread to process receive messages from broker.\"\"\"\n    while not client.exit_event.is_set():\n        # Poll socket for messages every timeout interval\n        try:\n            items = client.poller.poll(1000)\n        except KeyboardInterrupt:\n            break  # Interrupted\n        except:\n            continue\n        if items:\n            msg = client.broker_socket.recv_multipart()\n            log.debug(f\"{client.name} - received '{msg}'\")\n            if msg[2] == NFP.EVENT:\n                client.event_queue.put(msg)\n                client.stats_recv_event_from_broker += 1\n            else:\n                client.recv_queue.put(msg)\n                client.stats_recv_from_broker += 1\n
    "},{"location":"norfab_exceptions_reference/","title":"Exceptions","text":""},{"location":"norfab_exceptions_reference/#norfab.core.exceptions.UnsupportedPluginError","title":"UnsupportedPluginError","text":"

    Bases: Exception

    Exception to raise when specified plugin not supported

    "},{"location":"norfab_exceptions_reference/#norfab.core.exceptions.UnsupportedServiceError","title":"UnsupportedServiceError","text":"

    Bases: Exception

    Exception to raise when specified service not supported

    "},{"location":"norfab_getting_started/","title":"Getting Started","text":"

    Once NorFab installed, next step is to create a folder that will hold your environment and start creating inventory files with required configurations.

    Create norfab folder and inside of it create inventory.yaml file with this content:

    inventory.yaml
    broker: # (1)!\n  endpoint: \"tcp://127.0.0.1:5555\" # (2)!\n\nworkers: # (3)!\n  nornir-*: # (4)!\n    - nornir/common.yaml   \n  nornir-worker-1: # (5)!\n    - nornir/nornir-worker-1.yaml\n\ntopology: # (6)!\n  broker: True # (7)!\n  workers: # (8)!\n    - nornir-worker-1\n
    1. Broker configuration section
    2. URL to listen for connections on - localhost port 5555 in this case
    3. Workers configuration section
    4. glob pattern that will match all workers with nornir- in the name and map common.yaml context for each such a worker
    5. Worker definition to map inventory file to a specific worker that has name nornir-worker-1
    6. Topology section to define what components to run
    7. Start broker process
    8. List of workers names to start processes for

    In this example we are working with Nornir service.

    Create nornir folder and inside of it create two files.

    First file common.yaml to host configuration common for all Nornir service workers:

    common.yaml
    service: nornir # (1)!\nbroker_endpoint: \"tcp://127.0.0.1:5555\" # (2)!\n\n# next comes Nornir inventory and configuration\nrunner: # (3)!\n  plugin: RetryRunner\ndefault: {} # (4)!\ngroups: {} # (5)!\n
    1. Name of the service this worker hosting
    2. Broker URL to initiate connections with
    3. Nornir runner plugin configuration
    4. Nornir default data section
    5. Nornir groups definition section

    Second file specific to the worker with name nornir-worker-1 which holds Nornir inventory data:

    nornir-worker-1.yaml
    hosts: \n  R1:\n    hostname: r1.lab.local\n    platform: cisco_ios\n    username: developer\n    password: secretpassword\n  R2:\n    hostname: 10.0.0.2\n    platform: cisco_ios\n    username: developer\n    password: secretpassword\n

    This is how files structure will look like:

    \u2514\u2500\u2500\u2500norfab\n    \u2502   inventory.yaml\n    \u2502\n    \u2514\u2500\u2500\u2500nornir\n            common.yaml\n            nornir-worker-1.yaml\n

    Now you are ready to start NorFab Interactive Command Line Shell Client - NFCLI. Open terminal window, navigate to the folder where inventory.yaml located and start NFCLI:

    C:\\>cd norfab\nC:\\norfab>nfcli\nnf#\n

    this will start the NorFab broker process, Nornir worker process, instantiate NFCLI client and drop you into interactive command line shell

    nf#? # (1)!\n file      File sharing service\n netbox    Netbox service\n nornir    Nornir service\n show      NorFab show commands\n exit      Exit current shell\n help      Print help message\n pwd       Print current shell path\n top       Exit to top shell\nnf#show workers # (2)!\n name             service  status  holdtime  keepalives tx/rx  alive (s)\n nornir-worker-1  nornir   alive   12.8      58 / 58           149\nnf#\nnf#nornir # (3)!\nnf[nornir]#?\n cfg     Configure devices over CLI interface\n cli     Send CLI commands to devices\n show    Show Nornir service parameters\n task    Run Nornir task\n test    Run network tests\n end     Exit application\n exit    Exit current shell\n help    Print help message\n pwd     Print current shell path\n top     Exit to top shell\nnf[nornir]#show hosts\n {\n     \"nornir-worker-1\": [\n         \"R1\",\n         \"R2\"\n     ]\n }\nnf[nornir]# end\nExiting...\n
    1. Question mark plus enter to print commands help
    2. Run show command
    3. Drop into Nornir Service command shell

    NorFab CLI supports Tab completions, question mark help together with sub-shells, read more about NorFab CLI and how to use it here.

    That's it

    "},{"location":"norfab_installation/","title":"Installation","text":""},{"location":"norfab_installation/#norfab-core","title":"NorFab Core","text":"

    Install NorFab core from PyPI

    pip install norfab\n
    "},{"location":"norfab_installation/#extras","title":"Extras","text":"

    Several extra installations supported tailoring certain services dependencies depending on what you want to run on a given node.

    To install everything can use full extras:

    pip install norfab[full]\n
    "},{"location":"norfab_installation/#norfab-cli-dependencies","title":"NORFAB CLI Dependencies","text":"

    To install NorFab Interactive CLI dependencies

    pip install norfab[nfcli]\n
    "},{"location":"norfab_installation/#nornir-service-dependencies","title":"Nornir Service Dependencies","text":"

    To install Nornir service dependencies

    pip install norfab[nornir_service]\n
    "},{"location":"norfab_installation/#netbox-service-dependencies","title":"Netbox Service Dependencies","text":"

    To install Netbox service dependencies

    pip install norfab[netbox_service]\n
    "},{"location":"norfab_inventory/","title":"NorFab Inventory","text":"

    NorFab comes with Simple Inventory Datastore (SID) hosted by broker.

    "},{"location":"norfab_inventory/#broker-inventory","title":"Broker Inventory","text":"

    TBD

    "},{"location":"norfab_inventory/#workers-inventory","title":"Workers Inventory","text":"

    To understand how Simple Inventory Datastore serves workers inventory it is good to know that each worker has a unique name to identify it.

    With that in mind, the goal is to map inventory data to individual worker by its name.

    For example, let's pretend that worker name is nornir-worker-1 and we have common.yaml and nornir-worker-1.yaml files with inventory data that we need to provide worker with.

    To do the mapping between worker name and inventory files we can put this in NorFab inventory (inventory.yaml) file:

    workers:\n  nornir-*:\n    - nornir/common.yaml  \n  nornir-worker-1:\n    - nornir/nornir-worker-1.yaml\n

    Where files structure would look like this:

    \u2514\u2500\u2500\u2500rootfolder\n    \u2502   inventory.yaml\n    \u2502\n    \u2514\u2500\u2500\u2500nornir\n            common.yaml\n            nornir-worker-1.yaml\n

    As you can see, inventory.yaml file contains workers section with a dictionary keyed by glob patterns to match against workers' names, once worker name matched by the pattern, all items in the list underneaths that pattern being loaded and recursively merged. As such, process continues until all patterns evaluated. Final output of the process is a combined inventory data of all the matched files.

    The recursive logic of combining inventory data files is pretty straightforward - each next data file merged into the previous data file overriding the overlapping values.

    The glob pattern matching logic allows be as specific as required and map specific files to individual workers or to map single data file to multiple workers or map multiple files to multiple workers, all combinations supported.

    For example, we have a group of two workers with names netbox-wroker-1.1 and netbox-worker-1.2 and we want to map netbox_common.yaml to both of the workers, in that case NorFab inventory (inventory.yaml) file could have this content:

    workers:\n  netbox-worker-1.*:\n    - nornir/netbox_common.yaml  \n

    Where files structure would look like this:

    \u2514\u2500\u2500\u2500rootfolder\n    \u2502   inventory.yaml\n    \u2502\n    \u2514\u2500\u2500\u2500netbox\n            netbox_common.yaml\n

    Both workers will be served with netbox_common.yaml file content as an inventory data.

    "},{"location":"norfab_inventory/#workers-inventory-parameters","title":"Workers Inventory Parameters","text":"

    Workers inventory can contain these common parameters:

    1. service - name of the service this worker belongs to
    2. broker_endpoint - Broker URL to connect to

    Sample worker base inventory:

    service: nornir\nbroker_endpoint: \"tcp://127.0.0.1:5555\"\n

    The rest of the inventory data is worker specific.

    "},{"location":"norfab_inventory/#topology-inventory","title":"Topology Inventory","text":"

    Topology section of NorFab inventory identifies the components that need to be started on the given node.

    "},{"location":"norfab_inventory_reference/","title":"Simple Inventory","text":"

    Simple Local Inventory is an inventory plugin to load inventory data from locally stored files.

    Sample inventory file

    broker:\n  endpoint: \"tcp://127.0.0.1:5555\"\n\nworkers:\n  nornir-*:\n    - nornir/common.yaml  \n  nornir-worker-1:\n    - nornir/nornir-worker-1.yaml\n\ntopology:\n  broker: True\n  workers:\n    - nornir-worker-1\n

    where nornir/common.yaml contains

    service: nornir\nbroker_endpoint: \"tcp://127.0.0.1:5555\"\nrunner:\n  plugin: RetryRunner\n  options: \n    num_workers: 100\n    num_connectors: 10\n    connect_retry: 3\n    connect_backoff: 1000\n    connect_splay: 100\n    task_retry: 3\n    task_backoff: 1000\n    task_splay: 100\n    reconnect_on_fail: True\n    task_timeout: 600\n

    and nornir/nornir-worker-1.yaml contains

    hosts: \n  csr1000v-1:\n    hostname: sandbox-1.lab.com\n    platform: cisco_ios\n    username: developer\n    password: secretpassword\n  csr1000v-2:\n    hostname: sandbox-2.lab.com\n    platform: cisco_ios\n    username: developer\n    password: secretpassword\ngroups: {}\ndefaults: {}\n

    Whenever inventory queried to provide data for worker with name nornir-worker-1 Simple Inventory iterates over workers dictionary and recursively merges data for keys (glob patterns) that matched worker name.

    "},{"location":"norfab_inventory_reference/#norfab.core.inventory.WorkersInventory","title":"WorkersInventory(path, data)","text":"

    Class to collect and server NorFab workers inventory data, forming it by recursively merging all data files that associated with the name of worker requesting inventory data.

    Parameters:

    Name Type Description Default path str

    OS path to top folder with workers inventory data

    required data dict

    dictionary keyed by glob patterns matching workers names and values being a list of OS paths to files with workers inventory data

    required Source code in norfab\\core\\inventory.py
    def __init__(self, path: str, data: dict) -> None:\n    \"\"\"\n    Class to collect and server NorFab workers inventory data,\n    forming it by recursively merging all data files that associated\n    with the name of worker requesting inventory data.\n\n    :param path: OS path to top folder with workers inventory data\n    :param data: dictionary keyed by glob patterns matching workers names\n        and values being a list of OS paths to files with workers\n        inventory data\n    \"\"\"\n    self.path, _ = os.path.split(path)\n    self.data = data\n
    "},{"location":"norfab_inventory_reference/#norfab.core.inventory.NorFabInventory","title":"NorFabInventory(path)","text":"

    NorFabInventory class to instantiate simple inventory.

    Parameters:

    Name Type Description Default path str

    OS path to YAML file with inventory data

    required Source code in norfab\\core\\inventory.py
    def __init__(self, path: str) -> None:\n    \"\"\"\n    NorFabInventory class to instantiate simple inventory.\n\n    :param path: OS path to YAML file with inventory data\n    \"\"\"\n    self.broker = {}\n    self.workers = {}\n    self.topology = {}\n    path = os.path.abspath(path)\n    self.load(path)\n
    "},{"location":"norfab_inventory_reference/#norfab.core.inventory.merge_recursively","title":"merge_recursively(data, merge)","text":"

    Function to merge two dictionaries data recursively.

    Parameters:

    Name Type Description Default data dict

    primary dictionary

    required merge dict

    dictionary to merge into primary overriding the content

    required Source code in norfab\\core\\inventory.py
    def merge_recursively(data: dict, merge: dict) -> None:\n    \"\"\"\n    Function to merge two dictionaries data recursively.\n\n    :param data: primary dictionary\n    :param merge: dictionary to merge into primary overriding the content\n    \"\"\"\n    assert isinstance(data, dict) and isinstance(\n        merge, dict\n    ), f\"Only supports dictionary/dictionary data merges, not {type(data)}/{type(merge)}\"\n    for k, v in merge.items():\n        if k in data:\n            # merge two lists\n            if isinstance(data[k], list) and isinstance(v, list):\n                for i in v:\n                    if i not in data[k]:\n                        data[k].append(i)\n            # recursively merge dictionaries\n            elif isinstance(data[k], dict) and isinstance(v, dict):\n                merge_recursively(data[k], v)\n            # rewrite existing value with new data\n            else:\n                data[k] = v\n        else:\n            data[k] = v\n
    "},{"location":"norfab_nfapi_reference/","title":"NFAPI (Python API)","text":"

    Utility class to implement Python API for interfacing with NorFab.

    NorFab Python API Client initialization class

    from norfab.core.nfapi import NorFab\n\nnf = NorFab(inventory=inventory)\nnf.start(start_broker=True, workers=[\"my-worker-1\"])\nNFCLIENT = nf.client\n

    Parameters:

    Name Type Description Default inventory str

    OS path to NorFab inventory YAML file

    './inventory.yaml' log_level str

    one or supported logging levels - CRITICAL, ERROR, WARNING, INFO, DEBUG

    'WARNING' Source code in norfab\\core\\nfapi.py
    def __init__(\n    self, inventory: str = \"./inventory.yaml\", log_level: str = \"WARNING\"\n) -> None:\n    \"\"\"\n    NorFab Python API Client initialization class\n\n    ```\n    from norfab.core.nfapi import NorFab\n\n    nf = NorFab(inventory=inventory)\n    nf.start(start_broker=True, workers=[\"my-worker-1\"])\n    NFCLIENT = nf.client\n    ```\n\n    :param inventory: OS path to NorFab inventory YAML file\n    :param log_level: one or supported logging levels - `CRITICAL`, `ERROR`, `WARNING`, `INFO`, `DEBUG`\n    \"\"\"\n    self.inventory = NorFabInventory(inventory)\n    self.log_level = log_level\n    self.broker_endpoint = self.inventory.get(\"broker\", {}).get(\"endpoint\")\n    self.broker_exit_event = Event()\n    self.workers_exit_event = Event()\n    self.clients_exit_event = Event()\n
    "},{"location":"norfab_nfapi_reference/#norfab.core.nfapi.NorFab.start","title":"start(start_broker=None, workers=None)","text":"

    Main entry method to start NorFab components.

    Parameters:

    Name Type Description Default start_broker bool

    if True, starts broker process

    None workers list

    list of worker names to start processes for

    None Source code in norfab\\core\\nfapi.py
    def start(\n    self,\n    start_broker: bool = None,\n    workers: list = None,\n):\n    \"\"\"\n    Main entry method to start NorFab components.\n\n    :param start_broker: if True, starts broker process\n    :param workers: list of worker names to start processes for\n    \"\"\"\n    if workers is None:\n        workers = self.inventory.topology.get(\"workers\", [])\n    if start_broker is None:\n        start_broker = self.inventory.topology.get(\"broker\", False)\n\n    # form a list of workers to start\n    workers_to_start = set()\n    for worker_name in workers:\n        if isinstance(worker_name, dict):\n            worker_name = tuple(worker_name)[0]\n        workers_to_start.add(worker_name)\n\n    # start the broker\n    if start_broker is True:\n        self.start_broker()\n\n    # start all the workers\n    while workers_to_start != set(self.workers_processes.keys()):\n        for worker in workers:\n            # extract worker name and data/params\n            if isinstance(worker, dict):\n                worker_name = tuple(worker)[0]\n                worker_data = worker[worker_name]\n            else:\n                worker_name = worker\n                worker_data = {}\n            # verify if need to start this worker\n            if worker_name not in workers_to_start:\n                continue\n            # start worker\n            try:\n                self.start_worker(worker_name, worker_data)\n            # if failed to start remove from workers to start\n            except KeyError:\n                workers_to_start.remove(worker_name)\n                log.error(\n                    f\"'{worker_name}' - failed to start worker, no inventory data found\"\n                )\n            except FileNotFoundError as e:\n                workers_to_start.remove(worker_name)\n                log.error(\n                    f\"'{worker_name}' - failed to start worker, inventory file not found '{e}'\"\n                )\n            except Exception as e:\n                workers_to_start.remove(worker_name)\n                log.error(f\"'{worker_name}' - failed to start worker, error '{e}'\")\n\n        time.sleep(0.01)\n\n    # make the API client\n    self.make_client()\n
    "},{"location":"norfab_nfapi_reference/#norfab.core.nfapi.NorFab.destroy","title":"destroy()","text":"

    Stop NORFAB processes.

    Source code in norfab\\core\\nfapi.py
    def destroy(self) -> None:\n    \"\"\"\n    Stop NORFAB processes.\n    \"\"\"\n    # stop client\n    self.clients_exit_event.set()\n    self.client.destroy()\n    # stop workers\n    self.workers_exit_event.set()\n    while self.workers_processes:\n        _, w = self.workers_processes.popitem()\n        w[\"process\"].join()\n    # stop broker\n    self.broker_exit_event.set()\n    if self.broker:\n        self.broker.join()\n
    "},{"location":"norfab_nfapi_reference/#norfab.core.nfapi.NorFab.make_client","title":"make_client(broker_endpoint=None)","text":"

    Make an instance of NorFab client

    Parameters:

    Name Type Description Default broker_endpoint str

    (str), Broker URL to connect with

    None Source code in norfab\\core\\nfapi.py
    def make_client(self, broker_endpoint: str = None) -> NFPClient:\n    \"\"\"\n    Make an instance of NorFab client\n\n    :param broker_endpoint: (str), Broker URL to connect with\n    \"\"\"\n\n    if broker_endpoint or self.broker_endpoint:\n        client = NFPClient(\n            broker_endpoint or self.broker_endpoint,\n            \"NFPClient\",\n            self.log_level,\n            self.clients_exit_event,\n        )\n        if self.client is None:  # own the first client\n            self.client = client\n        return client\n    else:\n        log.error(\"Failed to make client, no broker endpoint defined\")\n        return None\n
    "},{"location":"norfab_python_api_overview/","title":"NORFAB Python API","text":"

    NorFab python API exists to run the Automations fabric, components that need to be started defined in inventory.yaml file. To start working with NorFab need to import core object and instantiate it.

    from norfab.core.nfapi import NorFab\n\nnf = NorFab(inventory=\"./inventory.yaml\")\nnf.start()\nnf.destroy()\n

    Refer to Getting Started section on how to construct inventory.yaml file.

    All interaction with NorFab happens via client. On NorFab start an instance of local client created automatically and can be used to submit the jobs

    import pprint\nfrom norfab.core.nfapi import NorFab\n\nnf = NorFab(inventory=\"./inventory.yaml\")\nnf.start()\n\nresult = nf.client.run_job(\n    service=\"nornir\",\n    task=\"cli\",\n    kwargs={\"commands\": [\"show version\", \"show clock\"]}\n)\n\npprint.pprint(ret)\n\nnf.destroy()\n
    "},{"location":"norfab_worker_reference/","title":"Worker","text":""},{"location":"norfab_worker_reference/#norfab.core.worker--cudos","title":"CUDOS","text":"

    Inspired by Majordomo Protocol Worker API, ZeroMQ, Python version.

    Original MDP/Worker spec

    Location: http://rfc.zeromq.org/spec:7.

    Author: Min RK benjaminrk@gmail.com

    Based on Java example by Arkadiusz Orzechowski

    "},{"location":"norfab_worker_reference/#norfab.core.worker.WorkerWatchDog","title":"WorkerWatchDog(worker)","text":"

    Bases: Thread

    Class to monitor worker performance

    Source code in norfab\\core\\worker.py
    def __init__(self, worker):\n    super().__init__()\n    self.worker = worker\n    self.worker_process = psutil.Process(os.getpid())\n\n    # extract inventory attributes\n    self.watchdog_interval = worker.inventory.get(\"watchdog_interval\", 30)\n    self.memory_threshold_mbyte = worker.inventory.get(\n        \"memory_threshold_mbyte\", 1000\n    )\n    self.memory_threshold_action = worker.inventory.get(\n        \"memory_threshold_action\", \"log\"\n    )\n\n    # initiate variables\n    self.runs = 0\n    self.watchdog_tasks = []\n
    "},{"location":"norfab_worker_reference/#norfab.core.worker.WorkerWatchDog.get_ram_usage","title":"get_ram_usage()","text":"

    Return RAM usage in Mbyte

    Source code in norfab\\core\\worker.py
    def get_ram_usage(self):\n    \"\"\"Return RAM usage in Mbyte\"\"\"\n    return self.worker_process.memory_info().rss / 1024000\n
    "},{"location":"norfab_worker_reference/#norfab.core.worker.Result","title":"Result(result=None, failed=False, errors=None, task=None, messages=None)","text":"

    Result of running individual tasks.

    Attributes/Arguments:

    Parameters:

    Name Type Description Default changed

    True if the task is changing the system

    required result Any

    Result of the task execution, see task's documentation for details

    None failed bool

    Whether the execution failed or not

    False (logging.LEVEL) severity_level

    Severity level associated to the result of the execution

    required errors Optional[List[str]]

    exception thrown during the execution of the task (if any)

    None task str

    Task function name that produced the results

    None Source code in norfab\\core\\worker.py
    def __init__(\n    self,\n    result: Any = None,\n    failed: bool = False,\n    errors: Optional[List[str]] = None,\n    task: str = None,\n    messages: Optional[List[str]] = None,\n) -> None:\n    self.task = task\n    self.result = result\n    self.failed = failed\n    self.errors = errors or []\n    self.messages = messages or []\n
    "},{"location":"norfab_worker_reference/#norfab.core.worker.Result.dictionary","title":"dictionary()","text":"

    Method to serialize result as a dictionary

    Source code in norfab\\core\\worker.py
    def dictionary(self):\n    \"\"\"Method to serialize result as a dictionary\"\"\"\n    if not isinstance(self.errors, list):\n        self.errors = [self.errors]\n    if not isinstance(self.messages, list):\n        self.messages = [self.messages]\n\n    return {\n        \"task\": self.task,\n        \"failed\": self.failed,\n        \"errors\": self.errors,\n        \"result\": self.result,\n        \"messages\": self.messages,\n    }\n
    "},{"location":"norfab_worker_reference/#norfab.core.worker.NFPWorker","title":"NFPWorker(broker, service, name, exit_event, log_level='WARNING', multiplier=6, keepalive=2500)","text":"

    Parameters:

    Name Type Description Default broker str

    str, broker endpoint e.g. tcp://127.0.0.1:5555

    required service str

    str, service name

    required name str

    str, worker name

    required exist_event

    obj, threading event, if set signal worker to stop

    required multiplier int

    int, number of keepalives lost before consider other party dead

    6 keepalive int

    int, keepalive interval in milliseconds

    2500 Source code in norfab\\core\\worker.py
    def __init__(\n    self,\n    broker: str,\n    service: str,\n    name: str,\n    exit_event,\n    log_level: str = \"WARNING\",\n    multiplier: int = 6,\n    keepalive: int = 2500,\n):\n    log.setLevel(log_level.upper())\n    self.log_level = log_level\n    self.broker = broker\n    self.service = service\n    self.name = name\n    self.exit_event = exit_event\n    self.broker_socket = None\n    self.socket_lock = (\n        threading.Lock()\n    )  # used for keepalives to protect socket object\n    self.base_dir = f\"__norfab__/files/worker/{self.name}/\"\n    self.base_dir_jobs = os.path.join(self.base_dir, \"jobs\")\n\n    self.ctx = zmq.Context()\n    self.poller = zmq.Poller()\n    self.reconnect_to_broker()\n\n    self.destroy_event = threading.Event()\n    self.request_thread = None\n    self.reply_thread = None\n    self.close_thread = None\n    self.recv_thread = None\n    self.event_thread = None\n\n    self.post_queue = queue.Queue(maxsize=0)\n    self.get_queue = queue.Queue(maxsize=0)\n    self.delete_queue = queue.Queue(maxsize=0)\n    self.event_queue = queue.Queue(maxsize=0)\n\n    # create queue file\n    os.makedirs(self.base_dir, exist_ok=True)\n    os.makedirs(self.base_dir_jobs, exist_ok=True)\n    self.queue_filename = os.path.join(self.base_dir_jobs, f\"{self.name}.queue.txt\")\n    if not os.path.exists(self.queue_filename):\n        with open(self.queue_filename, \"w\") as f:\n            pass\n    self.queue_done_filename = os.path.join(\n        self.base_dir_jobs, f\"{self.name}.queue.done.txt\"\n    )\n    if not os.path.exists(self.queue_done_filename):\n        with open(self.queue_done_filename, \"w\") as f:\n            pass\n\n    self.keepaliver = KeepAliver(\n        address=None,\n        socket=self.broker_socket,\n        multiplier=multiplier,\n        keepalive=keepalive,\n        exit_event=self.destroy_event,\n        service=self.service,\n        whoami=NFP.WORKER,\n        name=self.name,\n        socket_lock=self.socket_lock,\n        log_level=self.log_level,\n    )\n    self.keepaliver.start()\n    self.client = NFPClient(\n        self.broker, name=f\"{self.name}-NFPClient\", exit_event=self.exit_event\n    )\n
    "},{"location":"norfab_worker_reference/#norfab.core.worker.NFPWorker.reconnect_to_broker","title":"reconnect_to_broker()","text":"

    Connect or reconnect to broker

    Source code in norfab\\core\\worker.py
    def reconnect_to_broker(self):\n    \"\"\"Connect or reconnect to broker\"\"\"\n    if self.broker_socket:\n        self.send_to_broker(NFP.DISCONNECT)\n        self.poller.unregister(self.broker_socket)\n        self.broker_socket.close()\n\n    self.broker_socket = self.ctx.socket(zmq.DEALER)\n    self.broker_socket.setsockopt_unicode(zmq.IDENTITY, self.name, \"utf8\")\n    self.broker_socket.linger = 0\n    self.broker_socket.connect(self.broker)\n    self.poller.register(self.broker_socket, zmq.POLLIN)\n\n    # Register service with broker\n    self.send_to_broker(NFP.READY)\n\n    log.info(\n        f\"{self.name} - registered to broker at '{self.broker}', service '{self.service}'\"\n    )\n
    "},{"location":"norfab_worker_reference/#norfab.core.worker.NFPWorker.send_to_broker","title":"send_to_broker(command, msg=None)","text":"

    Send message to broker.

    If no msg is provided, creates one internally

    Source code in norfab\\core\\worker.py
    def send_to_broker(self, command, msg: list = None):\n    \"\"\"Send message to broker.\n\n    If no msg is provided, creates one internally\n    \"\"\"\n    if command == NFP.READY:\n        msg = [b\"\", NFP.WORKER, NFP.READY, self.service]\n    elif command == NFP.DISCONNECT:\n        msg = [b\"\", NFP.WORKER, NFP.DISCONNECT, self.service]\n    elif command == NFP.RESPONSE:\n        msg = [b\"\", NFP.WORKER, NFP.RESPONSE] + msg\n    elif command == NFP.EVENT:\n        msg = [b\"\", NFP.WORKER, NFP.EVENT] + msg\n    else:\n        log.error(\n            f\"{self.name} - cannot send '{command}' to broker, command unsupported\"\n        )\n        return\n\n    log.debug(f\"{self.name} - sending '{msg}'\")\n\n    with self.socket_lock:\n        self.broker_socket.send_multipart(msg)\n
    "},{"location":"norfab_worker_reference/#norfab.core.worker.NFPWorker.load_inventory","title":"load_inventory()","text":"

    Function to load inventory from broker for this worker name.

    Source code in norfab\\core\\worker.py
    def load_inventory(self):\n    \"\"\"\n    Function to load inventory from broker for this worker name.\n    \"\"\"\n    inventory_data = self.client.get(\n        \"sid.service.broker\", \"get_inventory\", kwargs={\"name\": self.name}\n    )\n\n    log.debug(f\"{self.name} - worker received invenotry data {inventory_data}\")\n\n    if inventory_data[\"results\"]:\n        return json.loads(inventory_data[\"results\"])\n    else:\n        return {}\n
    "},{"location":"norfab_worker_reference/#norfab.core.worker.NFPWorker.fetch_file","title":"fetch_file(url, raise_on_fail=False, read=True)","text":"

    Function to download file from broker File Sharing Service

    Parameters:

    Name Type Description Default url str

    file location string in nf://<filepath> format

    required raise_on_fail bool

    raise FIleNotFoundError if download fails

    False read bool

    if True returns file content, return OS path to saved file otherwise

    True Source code in norfab\\core\\worker.py
    def fetch_file(\n    self, url: str, raise_on_fail: bool = False, read: bool = True\n) -> str:\n    \"\"\"\n    Function to download file from broker File Sharing Service\n\n    :param url: file location string in ``nf://<filepath>`` format\n    :param raise_on_fail: raise FIleNotFoundError if download fails\n    :param read: if True returns file content, return OS path to saved file otherwise\n    \"\"\"\n    status, file_content = self.client.fetch_file(url=url, read=read)\n    msg = f\"{self.name} - worker '{url}' fetch file failed with status '{status}'\"\n\n    if status == \"200\":\n        return file_content\n    elif raise_on_fail is True:\n        raise FileNotFoundError(msg)\n    else:\n        log.error(msg)\n        return None\n
    "},{"location":"norfab_worker_reference/#norfab.core.worker.NFPWorker.fetch_jinja2","title":"fetch_jinja2(url)","text":"

    Helper function to recursively download Jinja2 template together with other templates referenced using \"include\" statements

    Parameters:

    Name Type Description Default url str

    nf://file/path like URL to download file

    required Source code in norfab\\core\\worker.py
    def fetch_jinja2(self, url: str) -> str:\n    \"\"\"\n    Helper function to recursively download Jinja2 template together with\n    other templates referenced using \"include\" statements\n\n    :param url: ``nf://file/path`` like URL to download file\n    \"\"\"\n    filepath = self.fetch_file(url, read=False)\n    if filepath is None:\n        msg = f\"{self.name} - file download failed '{url}'\"\n        raise FileNotFoundError(msg)\n\n    # download Jinja2 template \"include\"-ed files\n    content = self.fetch_file(url, read=True)\n    j2env = Environment(loader=\"BaseLoader\")\n    try:\n        parsed_content = j2env.parse(content)\n    except Exception as e:\n        msg = f\"{self.name} - Jinja2 template parsing failed '{url}', error: '{e}'\"\n        raise Exception(msg)\n\n    # run recursion on include statements\n    for node in parsed_content.find_all(Include):\n        include_file = node.template.value\n        base_path = os.path.split(url)[0]\n        self.fetch_jinja2(os.path.join(base_path, include_file))\n\n    return filepath\n
    "},{"location":"norfab_worker_reference/#norfab.core.worker.request_filename","title":"request_filename(suuid, base_dir_jobs)","text":"

    Returns freshly allocated request filename for given UUID str

    Source code in norfab\\core\\worker.py
    def request_filename(suuid: Union[str, bytes], base_dir_jobs: str):\n    \"\"\"Returns freshly allocated request filename for given UUID str\"\"\"\n    suuid = suuid.decode(\"utf-8\") if isinstance(suuid, bytes) else suuid\n    return os.path.join(base_dir_jobs, f\"{suuid}.req\")\n
    "},{"location":"norfab_worker_reference/#norfab.core.worker.reply_filename","title":"reply_filename(suuid, base_dir_jobs)","text":"

    Returns freshly allocated reply filename for given UUID str

    Source code in norfab\\core\\worker.py
    def reply_filename(suuid: Union[str, bytes], base_dir_jobs: str):\n    \"\"\"Returns freshly allocated reply filename for given UUID str\"\"\"\n    suuid = suuid.decode(\"utf-8\") if isinstance(suuid, bytes) else suuid\n    return os.path.join(base_dir_jobs, f\"{suuid}.rep\")\n
    "},{"location":"norfab_worker_reference/#norfab.core.worker.recv","title":"recv(worker, destroy_event)","text":"

    Thread to process receive messages from broker.

    Source code in norfab\\core\\worker.py
    def recv(worker, destroy_event):\n    \"\"\"Thread to process receive messages from broker.\"\"\"\n    while not destroy_event.is_set():\n        # Poll socket for messages every second\n        try:\n            items = worker.poller.poll(1000)\n        except KeyboardInterrupt:\n            break  # Interrupted\n        if items:\n            msg = worker.broker_socket.recv_multipart()\n            log.debug(f\"{worker.name} - received '{msg}'\")\n            empty = msg.pop(0)\n            header = msg.pop(0)\n            command = msg.pop(0)\n\n            if command == NFP.POST:\n                worker.post_queue.put(msg)\n            elif command == NFP.DELETE:\n                worker.delete_queue.put(msg)\n            elif command == NFP.GET:\n                worker.get_queue.put(msg)\n            elif command == NFP.KEEPALIVE:\n                worker.keepaliver.received_heartbeat([header] + msg)\n            elif command == NFP.DISCONNECT:\n                worker.reconnect_to_broker()\n            else:\n                log.debug(\n                    f\"{worker.name} - invalid input, header '{header}', command '{command}', message '{msg}'\"\n                )\n\n        if not worker.keepaliver.is_alive():\n            log.warning(f\"{worker.name} - '{worker.broker}' broker keepalive expired\")\n            worker.reconnect_to_broker()\n
    "},{"location":"nornir_service/","title":"Nornir Service","text":"

    Nornir Service is built on the Nornir library, a well adopted open-source tool for automating network operations.

    With each Nornir worker capable of handling multiple devices simultaneously, Nornir Service offers high scalability, allowing efficient management of large device fleets. By optimizing compute resources such as CPU, RAM, and storage, it delivers cost-effective performance.

    Additionally, Nornir Service supports various interfaces and libraries for seamless integration. For instance, the cli task can interact with devices via the Command Line Interface (CLI) using popular libraries like Netmiko, Scrapli, and NAPALM, providing flexibility for diverse network environments.

    "},{"location":"nornir_worker_api_reference/","title":"Nornir Worker","text":""},{"location":"nornir_worker_api_reference/#norfab.workers.nornir_worker--nornir-worker-inventory-reference","title":"Nornir Worker Inventory Reference","text":"
    • watchdog_interval - watchdog run interval in seconds, default is 30
    • connections_idle_timeout - watchdog connection idle timeout, default is None - no timeout, connection always kept alive, if set to 0, connections disconnected imminently after task completed, if positive number, connection disconnected after not being used for over connections_idle_timeout
    "},{"location":"nornir_worker_api_reference/#norfab.workers.nornir_worker.WatchDog","title":"WatchDog(worker)","text":"

    Bases: WorkerWatchDog

    Class to monitor Nornir worker performance

    Source code in norfab\\workers\\nornir_worker.py
    def __init__(self, worker):\n    super().__init__(worker)\n    self.worker = worker\n    self.connections_idle_timeout = worker.inventory.get(\n        \"connections_idle_timeout\", None\n    )\n    self.connections_data = {}  # store connections use timestamps\n    self.started_at = time.time()\n\n    # stats attributes\n    self.idle_connections_cleaned = 0\n    self.dead_connections_cleaned = 0\n\n    # list of tasks for watchdog to run in given order\n    self.watchdog_tasks = [\n        self.connections_clean,\n        self.connections_keepalive,\n    ]\n
    "},{"location":"nornir_worker_api_reference/#norfab.workers.nornir_worker.WatchDog.connections_update","title":"connections_update(nr, plugin)","text":"

    Function to update connection use timestamps for each host

    Parameters:

    Name Type Description Default nr

    Nornir object

    required plugin str

    connection plugin name

    required Source code in norfab\\workers\\nornir_worker.py
    def connections_update(self, nr, plugin: str) -> None:\n    \"\"\"\n    Function to update connection use timestamps for each host\n\n    :param nr: Nornir object\n    :param plugin: connection plugin name\n    \"\"\"\n    conn_stats = {\n        \"last_use\": None,\n        \"last_keepealive\": None,\n        \"keepalive_count\": 0,\n    }\n    for host_name in nr.inventory.hosts:\n        self.connections_data.setdefault(host_name, {})\n        self.connections_data[host_name].setdefault(plugin, conn_stats.copy())\n        self.connections_data[host_name][plugin][\"last_use\"] = time.ctime()\n    log.info(\n        f\"{self.worker.name} - updated connections use timestamps for '{plugin}'\"\n    )\n
    "},{"location":"nornir_worker_api_reference/#norfab.workers.nornir_worker.WatchDog.connections_clean","title":"connections_clean()","text":"

    Check if need to tear down connections that are idle - not being used over connections_idle_timeout

    Source code in norfab\\workers\\nornir_worker.py
    def connections_clean(self):\n    \"\"\"\n    Check if need to tear down connections that are idle -\n    not being used over connections_idle_timeout\n    \"\"\"\n    # dictionary keyed by plugin name and value as a list of hosts\n    disconnect = {}\n    if not self.worker.connections_lock.acquire(blocking=False):\n        return\n    try:\n        # if idle timeout not set, connections don't age out\n        if self.connections_idle_timeout is None:\n            disconnect = {}\n        # disconnect all connections for all hosts\n        elif self.connections_idle_timeout == 0:\n            disconnect = {\"all\": list(self.connections_data.keys())}\n        # only disconnect aged/idle connections\n        elif self.connections_idle_timeout > 0:\n            for host_name, plugins in self.connections_data.items():\n                for plugin, conn_data in plugins.items():\n                    last_use = time.mktime(time.strptime(conn_data[\"last_use\"]))\n                    age = time.time() - last_use\n                    if age > self.connections_idle_timeout:\n                        disconnect.setdefault(plugin, [])\n                        disconnect[plugin].append(host_name)\n        # run task to disconnect connections for aged hosts\n        for plugin, hosts in disconnect.items():\n            if not hosts:\n                continue\n            aged_hosts = FFun(self.worker.nr, FL=hosts)\n            aged_hosts.run(task=nr_connections, call=\"close\", conn_name=plugin)\n            log.debug(\n                f\"{self.worker.name} watchdog, disconnected '{plugin}' \"\n                f\"connections for '{', '.join(hosts)}'\"\n            )\n            self.idle_connections_cleaned += len(hosts)\n            # wipe out connections data if all connection closed\n            if plugin == \"all\":\n                self.connections_data = {}\n                break\n            # remove disconnected plugin from host's connections_data\n            for host in hosts:\n                self.connections_data[host].pop(plugin)\n                if not self.connections_data[host]:\n                    self.connections_data.pop(host)\n    except Exception as e:\n        msg = f\"{self.worker.name} - watchdog failed to close idle connections, error: {e}\"\n        log.error(msg)\n    finally:\n        self.worker.connections_lock.release()\n
    "},{"location":"nornir_worker_api_reference/#norfab.workers.nornir_worker.WatchDog.connections_keepalive","title":"connections_keepalive()","text":"

    Keepalive connections and clean up dead connections if any

    Source code in norfab\\workers\\nornir_worker.py
    def connections_keepalive(self):\n    \"\"\"Keepalive connections and clean up dead connections if any\"\"\"\n    if self.connections_idle_timeout == 0:  # do not keepalive if idle is 0\n        return\n    if not self.worker.connections_lock.acquire(blocking=False):\n        return\n    try:\n        log.debug(f\"{self.worker.name} - watchdog running connections keepalive\")\n        stats = HostsKeepalive(self.worker.nr)\n        self.dead_connections_cleaned += stats[\"dead_connections_cleaned\"]\n        # update connections statistics\n        for plugins in self.connections_data.values():\n            for plugin in plugins.values():\n                plugin[\"last_keepealive\"] = time.ctime()\n                plugin[\"keepalive_count\"] += 1\n    except Exception as e:\n        msg = f\"{self.worker.name} - watchdog HostsKeepalive check error: {e}\"\n        log.error(msg)\n    finally:\n        self.worker.connections_lock.release()\n
    "},{"location":"nornir_worker_api_reference/#norfab.workers.nornir_worker.NornirWorker","title":"NornirWorker(broker, service, worker_name, exit_event=None, init_done_event=None, log_level='WARNING')","text":"

    Bases: NFPWorker

    Parameters:

    Name Type Description Default broker str

    broker URL to connect to

    required service str

    name of the service with worker belongs to

    required worker_name str

    name of this worker

    required exit_event

    if set, worker need to stop/exit

    None init_done_event

    event to set when worker done initializing

    None log_level str

    logging level of this worker

    'WARNING' Source code in norfab\\workers\\nornir_worker.py
    def __init__(\n    self,\n    broker: str,\n    service: str,\n    worker_name: str,\n    exit_event=None,\n    init_done_event=None,\n    log_level: str = \"WARNING\",\n):\n    super().__init__(broker, service, worker_name, exit_event, log_level)\n    self.init_done_event = init_done_event\n    self.tf_base_path = os.path.join(self.base_dir, \"tf\")\n\n    # misc attributes\n    self.connections_lock = Lock()\n\n    # get inventory from broker\n    self.inventory = self.load_inventory()\n\n    # pull Nornir inventory from Netbox\n    self._pull_netbox_inventory()\n\n    # initiate Nornir\n    self._init_nornir()\n\n    # initiate watchdog\n    self.watchdog = WatchDog(self)\n    self.watchdog.start()\n\n    self.init_done_event.set()\n    log.info(f\"{self.name} - Started\")\n
    "},{"location":"nornir_worker_api_reference/#norfab.workers.nornir_worker.NornirWorker.render_jinja2_templates","title":"render_jinja2_templates(templates, context, filters=None)","text":"

    helper function to render a list of Jinja2 templates

    Parameters:

    Name Type Description Default templates list[str]

    list of template strings to render

    required context dict

    Jinja2 context dictionary

    required filter

    custom Jinja2 filters

    required

    Returns:

    Type Description list[str]

    list of rendered strings

    Source code in norfab\\workers\\nornir_worker.py
    def render_jinja2_templates(\n    self, templates: list[str], context: dict, filters: dict = None\n) -> list[str]:\n    \"\"\"\n    helper function to render a list of Jinja2 templates\n\n    :param templates: list of template strings to render\n    :param context: Jinja2 context dictionary\n    :param filter: custom Jinja2 filters\n    :returns: list of rendered strings\n    \"\"\"\n    rendered = []\n    filters = filters or {}\n    for template in templates:\n        if template.startswith(\"nf://\"):\n            filepath = self.fetch_jinja2(template)\n            searchpath, filename = os.path.split(filepath)\n            j2env = Environment(loader=FileSystemLoader(searchpath))\n            renderer = j2env.get_template(filename)\n        else:\n            j2env = Environment(loader=\"BaseLoader\")\n            renderer = j2env.from_string(template)\n        j2env.filters.update(filters)  # add custom filters\n        rendered.append(renderer.render(**context))\n\n    return rendered\n
    "},{"location":"nornir_worker_api_reference/#norfab.workers.nornir_worker.NornirWorker.load_job_data","title":"load_job_data(job_data)","text":"

    Helper function to download job data and load it using YAML

    Parameters:

    Name Type Description Default job_data str

    URL to job data

    required Source code in norfab\\workers\\nornir_worker.py
    def load_job_data(self, job_data: str):\n    \"\"\"\n    Helper function to download job data and load it using YAML\n\n    :param job_data: URL to job data\n    \"\"\"\n    if self.is_url(job_data):\n        job_data = self.fetch_file(job_data)\n        if job_data is None:\n            msg = f\"{self.name} - '{job_data}' job data file download failed\"\n            raise FileNotFoundError(msg)\n        job_data = yaml.safe_load(job_data)\n\n    return job_data\n
    "},{"location":"nornir_worker_api_reference/#norfab.workers.nornir_worker.NornirWorker.get_nornir_hosts","title":"get_nornir_hosts(details=False, **kwargs)","text":"

    Produce a list of hosts managed by this worker

    Parameters:

    Name Type Description Default kwargs dict

    dictionary of nornir-salt Fx filters

    {} Source code in norfab\\workers\\nornir_worker.py
    def get_nornir_hosts(self, details: bool = False, **kwargs: dict) -> list:\n    \"\"\"\n    Produce a list of hosts managed by this worker\n\n    :param kwargs: dictionary of nornir-salt Fx filters\n    \"\"\"\n    filters = {k: kwargs.pop(k) for k in list(kwargs.keys()) if k in FFun_functions}\n    filtered_nornir = FFun(self.nr, **filters)\n    if details:\n        return Result(\n            result={\n                host_name: {\n                    \"platform\": str(host.platform),\n                    \"hostname\": str(host.hostname),\n                    \"port\": str(host.port),\n                    \"groups\": [str(g) for g in host.groups],\n                    \"username\": str(host.username),\n                }\n                for host_name, host in filtered_nornir.inventory.hosts.items()\n            }\n        )\n    else:\n        return Result(result=list(filtered_nornir.inventory.hosts))\n
    "},{"location":"nornir_worker_api_reference/#norfab.workers.nornir_worker.NornirWorker.get_nornir_inventory","title":"get_nornir_inventory(**kwargs)","text":"

    Retrieve running Nornir inventory for requested hosts

    Parameters:

    Name Type Description Default kwargs dict

    dictionary of nornir-salt Fx filters

    {} Source code in norfab\\workers\\nornir_worker.py
    def get_nornir_inventory(self, **kwargs: dict) -> dict:\n    \"\"\"\n    Retrieve running Nornir inventory for requested hosts\n\n    :param kwargs: dictionary of nornir-salt Fx filters\n    \"\"\"\n    filters = {k: kwargs.pop(k) for k in list(kwargs.keys()) if k in FFun_functions}\n    filtered_nornir = FFun(self.nr, **filters)\n    return Result(\n        result=filtered_nornir.inventory.dict(), task=\"get_nornir_inventory\"\n    )\n
    "},{"location":"nornir_worker_api_reference/#norfab.workers.nornir_worker.NornirWorker.get_nornir_version","title":"get_nornir_version()","text":"

    Produce Python packages version report

    Source code in norfab\\workers\\nornir_worker.py
    def get_nornir_version(self):\n    \"\"\"\n    Produce Python packages version report\n    \"\"\"\n    libs = {\n        \"scrapli\": \"\",\n        \"scrapli-netconf\": \"\",\n        \"scrapli-community\": \"\",\n        \"paramiko\": \"\",\n        \"netmiko\": \"\",\n        \"napalm\": \"\",\n        \"nornir\": \"\",\n        \"ncclient\": \"\",\n        \"nornir-netmiko\": \"\",\n        \"nornir-napalm\": \"\",\n        \"nornir-scrapli\": \"\",\n        \"nornir-utils\": \"\",\n        \"tabulate\": \"\",\n        \"xmltodict\": \"\",\n        \"puresnmp\": \"\",\n        \"pygnmi\": \"\",\n        \"pyyaml\": \"\",\n        \"jmespath\": \"\",\n        \"jinja2\": \"\",\n        \"ttp\": \"\",\n        \"nornir-salt\": \"\",\n        \"lxml\": \"\",\n        \"ttp-templates\": \"\",\n        \"ntc-templates\": \"\",\n        \"cerberus\": \"\",\n        \"pydantic\": \"\",\n        \"requests\": \"\",\n        \"textfsm\": \"\",\n        \"N2G\": \"\",\n        \"dnspython\": \"\",\n        \"pythonping\": \"\",\n        \"python\": sys.version.split(\" \")[0],\n        \"platform\": sys.platform,\n    }\n    # get version of packages installed\n    for pkg in libs.keys():\n        try:\n            libs[pkg] = importlib.metadata.version(pkg)\n        except importlib.metadata.PackageNotFoundError:\n            pass\n\n    return Result(result=libs)\n
    "},{"location":"nornir_worker_api_reference/#norfab.workers.nornir_worker.NornirWorker.task","title":"task(plugin, **kwargs)","text":"

    Function to invoke any of supported Nornir task plugins. This function performs dynamic import of requested plugin function and executes nr.run using supplied args and kwargs

    plugin attribute can refer to a file to fetch from file service. File must contain function named task accepting Nornir task object as a first positional argument, for example:

    # define connection name for RetryRunner to properly detect it\nCONNECTION_NAME = \"netmiko\"\n\n# create task function\ndef task(nornir_task_object, *args, **kwargs):\n    pass\n

    CONNECTION_NAME

    CONNECTION_NAME must be defined within custom task function file if RetryRunner in use, otherwise connection retry logic skipped and connections to all hosts initiated simultaneously up to the number of num_workers.

    Parameters:

    Name Type Description Default plugin str

    (str) path.to.plugin.task_fun to import or nf://path/to/task.py to download custom task

    required kwargs

    (dict) arguments to use with specified task plugin

    {} Source code in norfab\\workers\\nornir_worker.py
    def task(self, plugin: str, **kwargs) -> Result:\n    \"\"\"\n    Function to invoke any of supported Nornir task plugins. This function\n    performs dynamic import of requested plugin function and executes\n    ``nr.run`` using supplied args and kwargs\n\n    ``plugin`` attribute can refer to a file to fetch from file service. File must contain\n    function named ``task`` accepting Nornir task object as a first positional\n    argument, for example:\n\n    ```python\n    # define connection name for RetryRunner to properly detect it\n    CONNECTION_NAME = \"netmiko\"\n\n    # create task function\n    def task(nornir_task_object, *args, **kwargs):\n        pass\n    ```\n\n    !!! note \"CONNECTION_NAME\"\n\n        ``CONNECTION_NAME`` must be defined within custom task function file if\n        RetryRunner in use, otherwise connection retry logic skipped and connections\n        to all hosts initiated simultaneously up to the number of ``num_workers``.\n\n    :param plugin: (str) ``path.to.plugin.task_fun`` to import or ``nf://path/to/task.py``\n        to download custom task\n    :param kwargs: (dict) arguments to use with specified task plugin\n    \"\"\"\n    # extract attributes\n    add_details = kwargs.pop(\"add_details\", False)  # ResultSerializer\n    to_dict = kwargs.pop(\"to_dict\", True)  # ResultSerializer\n    filters = {k: kwargs.pop(k) for k in list(kwargs.keys()) if k in FFun_functions}\n    ret = Result(task=f\"{self.name}:task\", result={} if to_dict else [])\n\n    # download task from broker and load it\n    if plugin.startswith(\"nf://\"):\n        function_text = self.fetch_file(plugin)\n        if function_text is None:\n            raise FileNotFoundError(\n                f\"{self.name} - '{plugin}' task plugin download failed\"\n            )\n\n        # load task function running exec\n        globals_dict = {}\n        exec(function_text, globals_dict, globals_dict)\n        task_function = globals_dict[\"task\"]\n    # import task function\n    else:\n        # below same as \"from nornir.plugins.tasks import task_fun as task_function\"\n        task_fun = plugin.split(\".\")[-1]\n        module = __import__(plugin, fromlist=[\"\"])\n        task_function = getattr(module, task_fun)\n\n    self.nr.data.reset_failed_hosts()  # reset failed hosts\n    filtered_nornir = FFun(self.nr, **filters)  # filter hosts\n\n    # check if no hosts matched\n    if not filtered_nornir.inventory.hosts:\n        msg = (\n            f\"{self.name} - nothing to do, no hosts matched by filters '{filters}'\"\n        )\n        log.debug(msg)\n        ret.messages.append(msg)\n        return ret\n\n    nr = self._add_processors(filtered_nornir, kwargs)  # add processors\n\n    # run task\n    log.debug(f\"{self.name} - running Nornir task '{plugin}', kwargs '{kwargs}'\")\n    with self.connections_lock:\n        result = nr.run(task=task_function, **kwargs)\n    ret.result = ResultSerializer(result, to_dict=to_dict, add_details=add_details)\n\n    self.watchdog.connections_clean()\n\n    return ret\n
    "},{"location":"nornir_worker_api_reference/#norfab.workers.nornir_worker.NornirWorker.cli","title":"cli(commands=None, plugin='netmiko', cli_dry_run=False, run_ttp=None, job_data=None, to_dict=True, add_details=False, **kwargs)","text":"

    Function to collect show commands output from devices using Command Line Interface (CLI)

    Parameters:

    Name Type Description Default commands list

    list of commands to send to devices

    None plugin str

    plugin name to use - netmiko, scrapli, napalm

    'netmiko' cli_dry_run bool

    do not send commands to devices just return them

    False job_data str

    URL to YAML file with data or dictionary/list of data to pass on to Jinja2 rendering context

    None add_details bool

    if True will add task execution details to the results

    False to_dict bool

    default is True - produces dictionary results, if False will produce results list

    True run_ttp str

    TTP Template to run

    None Source code in norfab\\workers\\nornir_worker.py
    def cli(\n    self,\n    commands: list = None,\n    plugin: str = \"netmiko\",\n    cli_dry_run: bool = False,\n    run_ttp: str = None,\n    job_data: str = None,\n    to_dict: bool = True,\n    add_details: bool = False,\n    **kwargs,\n) -> dict:\n    \"\"\"\n    Function to collect show commands output from devices using\n    Command Line Interface (CLI)\n\n    :param commands: list of commands to send to devices\n    :param plugin: plugin name to use - ``netmiko``, ``scrapli``, ``napalm``\n    :param cli_dry_run: do not send commands to devices just return them\n    :param job_data: URL to YAML file with data or dictionary/list of data\n        to pass on to Jinja2 rendering context\n    :param add_details: if True will add task execution details to the results\n    :param to_dict: default is True - produces dictionary results, if False\n        will produce results list\n    :param run_ttp: TTP Template to run\n    \"\"\"\n    job_data = job_data or {}\n    filters = {k: kwargs.pop(k) for k in list(kwargs.keys()) if k in FFun_functions}\n    downloaded_cmds = []\n    timeout = self.current_job[\"timeout\"] * 0.9\n    ret = Result(task=f\"{self.name}:cli\", result={} if to_dict else [])\n\n    # decide on what send commands task plugin to use\n    if plugin == \"netmiko\":\n        task_plugin = netmiko_send_commands\n        if kwargs.get(\"use_ps\"):\n            kwargs.setdefault(\"timeout\", timeout)\n        else:\n            kwargs.setdefault(\"read_timeout\", timeout)\n    elif plugin == \"scrapli\":\n        task_plugin = scrapli_send_commands\n        kwargs.setdefault(\"timeout_ops\", timeout)\n    elif plugin == \"napalm\":\n        task_plugin = napalm_send_commands\n    else:\n        raise UnsupportedPluginError(f\"Plugin '{plugin}' not supported\")\n\n    self.nr.data.reset_failed_hosts()  # reset failed hosts\n    filtered_nornir = FFun(self.nr, **filters)  # filter hosts\n\n    # check if no hosts matched\n    if not filtered_nornir.inventory.hosts:\n        msg = (\n            f\"{self.name} - nothing to do, no hosts matched by filters '{filters}'\"\n        )\n        log.debug(msg)\n        ret.messages.append(msg)\n        return ret\n\n    # download TTP template\n    if self.is_url(run_ttp):\n        downloaded = self.fetch_file(run_ttp)\n        kwargs[\"run_ttp\"] = downloaded\n        if downloaded is None:\n            msg = f\"{self.name} - TTP template download failed '{run_ttp}'\"\n            raise FileNotFoundError(msg)\n    # use TTP template as is - inline template or ttp://xyz path\n    elif run_ttp:\n        kwargs[\"run_ttp\"] = run_ttp\n\n    # download job data\n    job_data = self.load_job_data(job_data)\n\n    nr = self._add_processors(filtered_nornir, kwargs)  # add processors\n\n    # render commands using Jinja2 on a per-host basis\n    if commands:\n        commands = commands if isinstance(commands, list) else [commands]\n        for host in nr.inventory.hosts.values():\n            rendered = self.render_jinja2_templates(\n                templates=commands,\n                context={\n                    \"host\": host,\n                    \"norfab\": self.client,\n                    \"nornir\": self,\n                    \"job_data\": job_data,\n                },\n            )\n            host.data[\"__task__\"] = {\"commands\": rendered}\n\n    # run task\n    log.debug(\n        f\"{self.name} - running cli commands '{commands}', kwargs '{kwargs}', is cli dry run - '{cli_dry_run}'\"\n    )\n    if cli_dry_run is True:\n        result = nr.run(\n            task=nr_test, use_task_data=\"commands\", name=\"cli_dry_run\", **kwargs\n        )\n    else:\n        with self.connections_lock:\n            result = nr.run(task=task_plugin, **kwargs)\n\n    ret.result = ResultSerializer(result, to_dict=to_dict, add_details=add_details)\n\n    # remove __task__ data\n    for host_name, host_object in nr.inventory.hosts.items():\n        _ = host_object.data.pop(\"__task__\", None)\n\n    self.watchdog.connections_update(nr, plugin)\n    self.watchdog.connections_clean()\n\n    return ret\n
    "},{"location":"nornir_worker_api_reference/#norfab.workers.nornir_worker.NornirWorker.nb_get_next_ip","title":"nb_get_next_ip(*args, **kwargs)","text":"

    Method to query next available IP address from Netbox service

    Source code in norfab\\workers\\nornir_worker.py
    def nb_get_next_ip(self, *args, **kwargs):\n    \"\"\"Method to query next available IP address from Netbox service\"\"\"\n    reply = self.client.run_job(\n        \"netbox\",\n        \"get_next_ip\",\n        args=args,\n        kwargs=kwargs,\n        workers=\"any\",\n        timeout=30,\n    )\n    # reply is a dict of {worker_name: results_dict}\n    result = list(reply.values())[0]\n\n    return result[\"result\"]\n
    "},{"location":"nornir_worker_api_reference/#norfab.workers.nornir_worker.NornirWorker.cfg","title":"cfg(config, plugin='netmiko', cfg_dry_run=False, to_dict=True, add_details=False, job_data=None, **kwargs)","text":"

    Function to send configuration commands to devices using Command Line Interface (CLI)

    Parameters:

    Name Type Description Default config list

    list of commands to send to devices

    required plugin str

    plugin name to use - netmiko, scrapli, napalm

    'netmiko' cfg_dry_run bool

    do not send commands to devices just return them

    False job_data str

    URL to YAML file with data or dictionary/list of data to pass on to Jinja2 rendering context

    None add_details bool

    if True will add task execution details to the results

    False to_dict bool

    default is True - produces dictionary results, if False will produce results list

    True Source code in norfab\\workers\\nornir_worker.py
    def cfg(\n    self,\n    config: list,\n    plugin: str = \"netmiko\",\n    cfg_dry_run: bool = False,\n    to_dict: bool = True,\n    add_details: bool = False,\n    job_data: str = None,\n    **kwargs,\n) -> dict:\n    \"\"\"\n    Function to send configuration commands to devices using\n    Command Line Interface (CLI)\n\n    :param config: list of commands to send to devices\n    :param plugin: plugin name to use - ``netmiko``, ``scrapli``, ``napalm``\n    :param cfg_dry_run: do not send commands to devices just return them\n    :param job_data: URL to YAML file with data or dictionary/list of data\n        to pass on to Jinja2 rendering context\n    :param add_details: if True will add task execution details to the results\n    :param to_dict: default is True - produces dictionary results, if False\n        will produce results list\n    \"\"\"\n    downloaded_cfg = []\n    config = config if isinstance(config, list) else [config]\n    filters = {k: kwargs.pop(k) for k in list(kwargs.keys()) if k in FFun_functions}\n    ret = Result(task=f\"{self.name}:cfg\", result={} if to_dict else [])\n    timeout = self.current_job[\"timeout\"]\n\n    # decide on what send commands task plugin to use\n    if plugin == \"netmiko\":\n        task_plugin = netmiko_send_config\n    elif plugin == \"scrapli\":\n        task_plugin = scrapli_send_config\n    elif plugin == \"napalm\":\n        task_plugin = napalm_configure\n    else:\n        raise UnsupportedPluginError(f\"Plugin '{plugin}' not supported\")\n\n    self.nr.data.reset_failed_hosts()  # reset failed hosts\n    filtered_nornir = FFun(self.nr, **filters)  # filter hosts\n\n    # check if no hosts matched\n    if not filtered_nornir.inventory.hosts:\n        msg = (\n            f\"{self.name} - nothing to do, no hosts matched by filters '{filters}'\"\n        )\n        ret.messages.append(msg)\n        log.debug(msg)\n        return ret\n\n    job_data = self.load_job_data(job_data)\n\n    nr = self._add_processors(filtered_nornir, kwargs)  # add processors\n\n    # render config using Jinja2 on a per-host basis\n    for host in nr.inventory.hosts.values():\n        rendered = self.render_jinja2_templates(\n            templates=config,\n            context={\n                \"host\": host,\n                \"norfab\": self.client,\n                \"nornir\": self,\n                \"job_data\": job_data,\n            },\n            filters={\"nb_get_next_ip\": self.nb_get_next_ip},\n        )\n        host.data[\"__task__\"] = {\"config\": rendered}\n\n    # run task\n    log.debug(\n        f\"{self.name} - sending config commands '{config}', kwargs '{kwargs}', is cfg_dry_run - '{cfg_dry_run}'\"\n    )\n    if cfg_dry_run is True:\n        result = nr.run(\n            task=nr_test, use_task_data=\"config\", name=\"cfg_dry_run\", **kwargs\n        )\n    else:\n        with self.connections_lock:\n            result = nr.run(task=task_plugin, **kwargs)\n        ret.changed = True\n\n    ret.result = ResultSerializer(result, to_dict=to_dict, add_details=add_details)\n\n    # remove __task__ data\n    for host_name, host_object in nr.inventory.hosts.items():\n        _ = host_object.data.pop(\"__task__\", None)\n\n    self.watchdog.connections_update(nr, plugin)\n    self.watchdog.connections_clean()\n\n    return ret\n
    "},{"location":"nornir_worker_api_reference/#norfab.workers.nornir_worker.NornirWorker.test","title":"test(suite, subset=None, dry_run=False, remove_tasks=True, failed_only=False, return_tests_suite=False, job_data=None, **kwargs)","text":"

    Function to tests data obtained from devices.

    Parameters:

    Name Type Description Default suite Union[list, str]

    path to YAML file with tests

    required dry_run bool

    if True, returns produced per-host tests suite content only

    False subset str

    list or string with comma separated non case sensitive glob patterns to filter tests' by name, subset argument ignored by dry run

    None failed_only bool

    if True returns test results for failed tests only

    False remove_tasks bool

    if False results will include other tasks output

    True return_tests_suite bool

    if True returns rendered per-host tests suite content in addition to test results using dictionary with results and suite keys

    False job_data str

    URL to YAML file with data or dictionary/list of data to pass on to Jinja2 rendering context

    None kwargs

    any additional arguments to pass on to Nornir service task

    {} Source code in norfab\\workers\\nornir_worker.py
    def test(\n    self,\n    suite: Union[list, str],\n    subset: str = None,\n    dry_run: bool = False,\n    remove_tasks: bool = True,\n    failed_only: bool = False,\n    return_tests_suite: bool = False,\n    job_data: str = None,\n    **kwargs,\n) -> dict:\n    \"\"\"\n    Function to tests data obtained from devices.\n\n    :param suite: path to YAML file with tests\n    :param dry_run: if True, returns produced per-host tests suite content only\n    :param subset: list or string with comma separated non case sensitive glob\n        patterns to filter tests' by name, subset argument ignored by dry run\n    :param failed_only: if True returns test results for failed tests only\n    :param remove_tasks: if False results will include other tasks output\n    :param return_tests_suite: if True returns rendered per-host tests suite\n        content in addition to test results using dictionary with ``results``\n        and ``suite`` keys\n    :param job_data: URL to YAML file with data or dictionary/list of data\n        to pass on to Jinja2 rendering context\n    :param kwargs: any additional arguments to pass on to Nornir service task\n    \"\"\"\n    downloaded_suite = None\n    tests = {}  # dictionary to hold per-host test suites\n    add_details = kwargs.get(\"add_details\", False)  # ResultSerializer\n    to_dict = kwargs.get(\"to_dict\", True)  # ResultSerializer\n    filters = {k: kwargs.pop(k) for k in list(kwargs.keys()) if k in FFun_functions}\n    ret = Result(task=f\"{self.name}:test\", result={} if to_dict else [])\n    suites = {}  # dictionary to hold combined test suites\n\n    self.nr.data.reset_failed_hosts()  # reset failed hosts\n    filtered_nornir = FFun(self.nr, **filters)  # filter hosts\n\n    # check if no hosts matched\n    if not filtered_nornir.inventory.hosts:\n        msg = (\n            f\"{self.name} - nothing to do, no hosts matched by filters '{filters}'\"\n        )\n        log.debug(msg)\n        ret.messages.append(msg)\n        if return_tests_suite is True:\n            ret.result = {\"test_results\": [], \"suite\": {}}\n        return ret\n\n    # download tests suite\n    downloaded_suite = self.fetch_jinja2(suite)\n\n    # download job data\n    job_data = self.load_job_data(job_data)\n\n    # generate per-host test suites\n    searchpath, template = os.path.split(downloaded_suite)\n    for host_name, host in filtered_nornir.inventory.hosts.items():\n        context = {\n            \"host\": host,\n            \"norfab\": self.client,\n            \"nornir\": self,\n            \"job_data\": job_data,\n        }\n        # render suite using Jinja2\n        try:\n            j2env = Environment(loader=FileSystemLoader(searchpath))\n            renderer = j2env.get_template(template)\n            rendered_suite = renderer.render(**context)\n        except Exception as e:\n            msg = f\"{self.name} - '{suite}' Jinja2 rendering failed: '{e}'\"\n            raise RuntimeError(msg)\n        # load suit using YAML\n        try:\n            tests[host_name] = yaml.safe_load(rendered_suite)\n        except Exception as e:\n            msg = f\"{self.name} - '{suite}' YAML load failed: '{e}'\"\n            raise RuntimeError(msg)\n\n    # validate tests suite\n    try:\n        _ = modelTestsProcessorSuite(tests=tests)\n    except Exception as e:\n        msg = f\"{self.name} - '{suite}' suite validation failed: '{e}'\"\n        raise RuntimeError(msg)\n\n    # download pattern, schema and custom function files\n    for host_name in tests.keys():\n        for index, item in enumerate(tests[host_name]):\n            for k in [\"pattern\", \"schema\", \"function_file\"]:\n                if self.is_url(item.get(k)):\n                    item[k] = self.fetch_file(\n                        item[k], raise_on_fail=True, read=True\n                    )\n                    if k == \"function_file\":\n                        item[\"function_text\"] = item.pop(k)\n            tests[host_name][index] = item\n\n    # save per-host tests suite content before mutating it\n    if return_tests_suite is True:\n        return_suite = copy.deepcopy(tests)\n\n    log.debug(f\"{self.name} - running test '{suite}', is dry run - '{dry_run}'\")\n    # run dry run task\n    if dry_run is True:\n        result = filtered_nornir.run(\n            task=nr_test, name=\"tests_dry_run\", ret_data_per_host=tests\n        )\n        ret.result = ResultSerializer(\n            result, to_dict=to_dict, add_details=add_details\n        )\n    # combine per-host tests in suites based on task task and arguments\n    # Why - to run tests using any nornir service tasks with various arguments\n    else:\n        for host_name, host_tests in tests.items():\n            for test in host_tests:\n                dhash = hashlib.md5()\n                test_args = test.pop(\"norfab\", {})\n                nrtask = test_args.get(\"nrtask\", \"cli\")\n                assert nrtask in [\n                    \"cli\",\n                    \"network\",\n                    \"cfg\",\n                    \"task\",\n                ], f\"{self.name} - unsupported NorFab Nornir Service task '{nrtask}'\"\n                test_json = json.dumps(test_args, sort_keys=True).encode()\n                dhash.update(test_json)\n                test_hash = dhash.hexdigest()\n                suites.setdefault(test_hash, {\"params\": test_args, \"tests\": {}})\n                suites[test_hash][\"tests\"].setdefault(host_name, [])\n                suites[test_hash][\"tests\"][host_name].append(test)\n        log.debug(\n            f\"{self.name} - combined per-host tests suites based on NorFab Nornir Service task and arguments:\\n{suites}\"\n        )\n        # run test suites collecting output from devices\n        for tests_suite in suites.values():\n            nrtask = tests_suite[\"params\"].pop(\"nrtask\", \"cli\")\n            function_kwargs = {\n                **tests_suite[\"params\"],\n                **kwargs,\n                **filters,\n                \"tests\": tests_suite[\"tests\"],\n                \"remove_tasks\": remove_tasks,\n                \"failed_only\": failed_only,\n                \"subset\": subset,\n            }\n            result = getattr(self, nrtask)(\n                **function_kwargs\n            )  # returns Result object\n            # save test results into overall results\n            if to_dict == True:\n                for host_name, host_res in result.result.items():\n                    ret.result.setdefault(host_name, {})\n                    ret.result[host_name].update(host_res)\n            else:\n                ret.result.extend(result.result)\n\n    # check if need to return tests suite content\n    if return_tests_suite is True:\n        ret.result = {\"test_results\": ret.result, \"suite\": return_suite}\n\n    return ret\n
    "},{"location":"nornir_worker_api_reference/#norfab.workers.nornir_worker.NornirWorker.network","title":"network(fun, **kwargs)","text":"

    Function to call various network related utility functions.

    Parameters:

    Name Type Description Default fun

    (str) utility function name to call

    required kwargs

    (dict) function arguments Available utility functions. resolve_dns function resolves hosts' hostname DNS returning IP addresses using nornir_salt.plugins.tasks.network.resolve_dns Nornir-Salt function. ping function Function to execute ICMP ping to host using nornir_salt.plugins.tasks.network.ping Nornir-Salt function.

    {} Source code in norfab\\workers\\nornir_worker.py
    def network(self, fun, **kwargs) -> dict:\n    \"\"\"\n    Function to call various network related utility functions.\n\n    :param fun: (str) utility function name to call\n    :param kwargs: (dict) function arguments\n\n    Available utility functions.\n\n    **resolve_dns** function\n\n    resolves hosts' hostname DNS returning IP addresses using\n    ``nornir_salt.plugins.tasks.network.resolve_dns`` Nornir-Salt\n    function.\n\n    **ping** function\n\n    Function to execute ICMP ping to host using\n    ``nornir_salt.plugins.tasks.network.ping`` Nornir-Salt\n    function.\n    \"\"\"\n    kwargs[\"call\"] = fun\n    return self.task(\n        plugin=\"nornir_salt.plugins.tasks.network\",\n        **kwargs,\n    )\n
    "},{"location":"nornir_worker_api_reference/#norfab.workers.nornir_worker.NornirWorker.parse","title":"parse(plugin='napalm', getters='get_facts', template=None, commands=None, to_dict=True, add_details=False, **kwargs)","text":"

    Function to parse network devices show commands output

    Parameters:

    Name Type Description Default plugin str

    plugin name to use - napalm, textfsm, ttp

    'napalm' getters str

    NAPALM getters to use

    'get_facts' commands list

    commands to send to devices for TextFSM or TTP template

    None template str

    TextFSM or TTP parsing template string or path to file For NAPALM plugin method can refer to a list of getters names.

    None Source code in norfab\\workers\\nornir_worker.py
    def parse(\n    self,\n    plugin: str = \"napalm\",\n    getters: str = \"get_facts\",\n    template: str = None,\n    commands: list = None,\n    to_dict: bool = True,\n    add_details: bool = False,\n    **kwargs,\n):\n    \"\"\"\n    Function to parse network devices show commands output\n\n    :param plugin: plugin name to use - ``napalm``, ``textfsm``, ``ttp``\n    :param getters: NAPALM getters to use\n    :param commands: commands to send to devices for TextFSM or TTP template\n    :param template: TextFSM or TTP parsing template string or path to file\n\n    For NAPALM plugin ``method`` can refer to a list of getters names.\n    \"\"\"\n    filters = {k: kwargs.pop(k) for k in list(kwargs.keys()) if k in FFun_functions}\n    ret = Result(task=f\"{self.name}:parse\", result={} if to_dict else [])\n\n    self.nr.data.reset_failed_hosts()  # reset failed hosts\n    filtered_nornir = FFun(self.nr, **filters)  # filter hosts\n\n    # check if no hosts matched\n    if not filtered_nornir.inventory.hosts:\n        msg = (\n            f\"{self.name} - nothing to do, no hosts matched by filters '{filters}'\"\n        )\n        ret.messages.append(msg)\n        log.debug(msg)\n        return ret\n\n    if plugin == \"napalm\":\n        nr = self._add_processors(filtered_nornir, kwargs)  # add processors\n        result = nr.run(task=napalm_get, getters=getters, **kwargs)\n        ret.result = ResultSerializer(\n            result, to_dict=to_dict, add_details=add_details\n        )\n    elif plugin == \"ttp\":\n        result = self.cli(\n            commands=commands or [],\n            run_ttp=template,\n            **filters,\n            **kwargs,\n            to_dict=to_dict,\n            add_details=add_details,\n            plugin=\"netmiko\",\n        )\n        ret.result = result.result\n    elif plugin == \"textfsm\":\n        result = self.cli(\n            commands=commands,\n            **filters,\n            **kwargs,\n            to_dict=to_dict,\n            add_details=add_details,\n            use_textfsm=True,\n            textfsm_template=template,\n            plugin=\"netmiko\",\n        )\n        ret.result = result.result\n    else:\n        raise UnsupportedPluginError(f\"Plugin '{plugin}' not supported\")\n\n    return ret\n
    "}]} \ No newline at end of file diff --git a/sitemap.xml b/sitemap.xml index 872262f..4d7ff39 100755 --- a/sitemap.xml +++ b/sitemap.xml @@ -32,6 +32,10 @@ https://dmulyalin.github.io/NORFAB/norfab_cli_overview/ 2024-09-28 + + https://dmulyalin.github.io/NORFAB/norfab_client_reference/ + 2024-09-28 + https://dmulyalin.github.io/NORFAB/norfab_exceptions_reference/ 2024-09-28 @@ -68,6 +72,10 @@ https://dmulyalin.github.io/NORFAB/norfab_rest_api_overview/ 2024-09-28 + + https://dmulyalin.github.io/NORFAB/norfab_worker_reference/ + 2024-09-28 + https://dmulyalin.github.io/NORFAB/nornir_service/ 2024-09-28 diff --git a/sitemap.xml.gz b/sitemap.xml.gz index 0567f320dcce873b3b095ca98dc94f48181d0548..b314bfc5fe0e47ba947dd370eab76b255d3edb80 100755 GIT binary patch delta 357 zcmV-r0h<2m0`vlq7=KY~gD?=s-}@=V_ZnCB&<<nho2?7E@7)TqpGtqMc{;uYsjGo%bFM7l zECqx_SPXsvR(zBdLJQ~w5s0T1N>=UxO)=mYYgmKb(;x*`1U@N|8-$-Ddp8NK^ssS5xA DtZu5& delta 347 zcmV-h0i^!)0_p;g7=O)b!!Qhh?|q7pd*iI^(2g`^JM`F%v7L?MD2bV42U$tlx1ZCN zvGW+%2IC)O=|hr*io?0r$pq+>HT(1-%hLpmw6!wrKK=YSuD9u-Toof~2b6eWzqCN)ODT6Gkf08UqKt>oS}fiP%Q5-yZ1EBWL>y^inrN!qaG}nc tYqgExLnCMq4!E%dE-Z+nD7<16hHVmV2>;t8To>PAmfwJhfn$*h001J_t4#m^