diff --git a/.github/workflows/collab.yml b/.github/workflows/collab.yml index e6e3a9a3..7a3284d4 100644 --- a/.github/workflows/collab.yml +++ b/.github/workflows/collab.yml @@ -30,7 +30,7 @@ jobs: - name: Install Build Software shell: bash -l {0} run: | - pip install jupyter-book==0.15.1 docutils==0.17.1 quantecon-book-theme==0.7.2 sphinx-tojupyter==0.3.0 sphinxext-rediraffe==0.2.7 sphinx-exercise==0.4.1 sphinxcontrib-youtube==1.1.0 sphinx-togglebutton==0.3.1 arviz==0.13.0 sphinx_proof==0.2.0 + pip install jupyter-book==0.15.1 docutils==0.17.1 quantecon-book-theme==0.7.2 sphinx-tojupyter==0.3.0 sphinxext-rediraffe==0.2.7 sphinx-exercise==0.4.1 sphinxcontrib-youtube==1.1.0 sphinx-togglebutton==0.3.1 arviz==0.13.0 sphinx_proof==0.2.0 sphinx_reredirects==0.1.3 # Build of HTML (Execution Testing) - name: Build HTML shell: bash -l {0} diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 1c5b4277..a7648a73 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -83,7 +83,7 @@ jobs: NETLIFY_AUTH_TOKEN: ${{ secrets.NETLIFY_AUTH_TOKEN }} NETLIFY_SITE_ID: ${{ secrets.NETLIFY_SITE_ID }} - name: Deploy website to gh-pages - uses: peaceiris/actions-gh-pages@v3 + uses: peaceiris/actions-gh-pages@v4 with: github_token: ${{ secrets.GITHUB_TOKEN }} publish_dir: _build/html/ diff --git a/environment.yml b/environment.yml index 74ae0173..f60099b5 100644 --- a/environment.yml +++ b/environment.yml @@ -4,7 +4,7 @@ channels: - conda-forge dependencies: - python=3.11 - - anaconda=2024.02 + - anaconda=2024.06 - pip - pip: - jupyter-book==0.15.1 @@ -17,6 +17,7 @@ dependencies: - ghp-import==1.1.0 - sphinxcontrib-youtube==1.1.0 - sphinx-togglebutton==0.3.1 + - sphinx_reredirects==0.1.3 # Sandpit Requirements # - PuLP # - cvxpy diff --git a/lectures/_config.yml b/lectures/_config.yml index 0b0bcf3c..2260fca5 100644 --- a/lectures/_config.yml +++ b/lectures/_config.yml @@ -35,7 +35,7 @@ latex: targetname: quantecon-python-intro.tex sphinx: - extra_extensions: [sphinx_multitoc_numbering, sphinxext.rediraffe, sphinx_exercise, sphinx_togglebutton, sphinx.ext.intersphinx, sphinx_proof, sphinx_tojupyter] + extra_extensions: [sphinx_multitoc_numbering, sphinxext.rediraffe, sphinx_exercise, sphinx_togglebutton, sphinx.ext.intersphinx, sphinx_proof, sphinx_tojupyter, sphinx_reredirects] config: bibtex_reference_style: author_year # false-positive links @@ -76,6 +76,9 @@ sphinx: colab_url : https://colab.research.google.com thebe : false # Add a thebe button to pages (requires the repository to run on Binder) intersphinx_mapping: + intermediate: + - https://python.quantecon.org/ + - null pyprog: - https://python-programming.quantecon.org/ - null @@ -108,6 +111,9 @@ sphinx: mathjax_path: https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js rediraffe_redirects: index_toc.md: intro.md + # Remote Redirects + redirects: + ak2: https://python.quantecon.org/ak2.html tojupyter_static_file_path: ["_static"] tojupyter_target_html: true tojupyter_urlpath: "https://intro.quantecon.org/" diff --git a/lectures/_static/lecture_specific/inequality/data.ipynb b/lectures/_static/lecture_specific/inequality/data.ipynb new file mode 100644 index 00000000..97aea652 --- /dev/null +++ b/lectures/_static/lecture_specific/inequality/data.ipynb @@ -0,0 +1,133 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "258b4bc9-2964-470a-8010-05c2162f5e05", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Requirement already satisfied: wbgapi in /Users/longye/anaconda3/lib/python3.10/site-packages (1.0.12)\n", + "Requirement already satisfied: plotly in /Users/longye/anaconda3/lib/python3.10/site-packages (5.22.0)\n", + "Requirement already satisfied: requests in /Users/longye/anaconda3/lib/python3.10/site-packages (from wbgapi) (2.31.0)\n", + "Requirement already satisfied: tabulate in /Users/longye/anaconda3/lib/python3.10/site-packages (from wbgapi) (0.9.0)\n", + "Requirement already satisfied: PyYAML in /Users/longye/anaconda3/lib/python3.10/site-packages (from wbgapi) (6.0)\n", + "Requirement already satisfied: tenacity>=6.2.0 in /Users/longye/anaconda3/lib/python3.10/site-packages (from plotly) (8.4.1)\n", + "Requirement already satisfied: packaging in /Users/longye/anaconda3/lib/python3.10/site-packages (from plotly) (23.1)\n", + "Requirement already satisfied: urllib3<3,>=1.21.1 in /Users/longye/anaconda3/lib/python3.10/site-packages (from requests->wbgapi) (1.26.16)\n", + "Requirement already satisfied: charset-normalizer<4,>=2 in /Users/longye/anaconda3/lib/python3.10/site-packages (from requests->wbgapi) (2.0.4)\n", + "Requirement already satisfied: idna<4,>=2.5 in /Users/longye/anaconda3/lib/python3.10/site-packages (from requests->wbgapi) (3.4)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /Users/longye/anaconda3/lib/python3.10/site-packages (from requests->wbgapi) (2024.6.2)\n" + ] + } + ], + "source": [ + "!pip install wbgapi plotly\n", + "\n", + "import pandas as pd\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "import random as rd\n", + "import wbgapi as wb\n", + "import plotly.express as px\n", + "\n", + "url = 'https://media.githubusercontent.com/media/QuantEcon/high_dim_data/main/SCF_plus/SCF_plus_mini.csv'\n", + "df = pd.read_csv(url)\n", + "df_income_wealth = df.dropna()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "9630a07a-fce5-474e-92af-104e67e82be5", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Requirement already satisfied: quantecon in /Users/longye/anaconda3/lib/python3.10/site-packages (0.7.1)\n", + "Requirement already satisfied: requests in /Users/longye/anaconda3/lib/python3.10/site-packages (from quantecon) (2.31.0)\n", + "Requirement already satisfied: numpy>=1.17.0 in /Users/longye/anaconda3/lib/python3.10/site-packages (from quantecon) (1.26.3)\n", + "Requirement already satisfied: numba>=0.49.0 in /Users/longye/anaconda3/lib/python3.10/site-packages (from quantecon) (0.59.1)\n", + "Requirement already satisfied: sympy in /Users/longye/anaconda3/lib/python3.10/site-packages (from quantecon) (1.12)\n", + "Requirement already satisfied: scipy>=1.5.0 in /Users/longye/anaconda3/lib/python3.10/site-packages (from quantecon) (1.12.0)\n", + "Requirement already satisfied: llvmlite<0.43,>=0.42.0dev0 in /Users/longye/anaconda3/lib/python3.10/site-packages (from numba>=0.49.0->quantecon) (0.42.0)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /Users/longye/anaconda3/lib/python3.10/site-packages (from requests->quantecon) (2024.6.2)\n", + "Requirement already satisfied: idna<4,>=2.5 in /Users/longye/anaconda3/lib/python3.10/site-packages (from requests->quantecon) (3.4)\n", + "Requirement already satisfied: charset-normalizer<4,>=2 in /Users/longye/anaconda3/lib/python3.10/site-packages (from requests->quantecon) (2.0.4)\n", + "Requirement already satisfied: urllib3<3,>=1.21.1 in /Users/longye/anaconda3/lib/python3.10/site-packages (from requests->quantecon) (1.26.16)\n", + "Requirement already satisfied: mpmath>=0.19 in /Users/longye/anaconda3/lib/python3.10/site-packages (from sympy->quantecon) (1.3.0)\n" + ] + } + ], + "source": [ + "!pip install quantecon\n", + "import quantecon as qe\n", + "\n", + "varlist = ['n_wealth', # net wealth \n", + " 't_income', # total income\n", + " 'l_income'] # labor income\n", + "\n", + "df = df_income_wealth\n", + "years = df.year.unique()\n", + "\n", + "# create lists to store Gini for each inequality measure\n", + "results = {}\n", + "\n", + "for var in varlist:\n", + " # create lists to store Gini\n", + " gini_yr = []\n", + " for year in years:\n", + " # repeat the observations according to their weights\n", + " counts = list(round(df[df['year'] == year]['weights'] ))\n", + " y = df[df['year'] == year][var].repeat(counts)\n", + " y = np.asarray(y)\n", + " \n", + " rd.shuffle(y) # shuffle the sequence\n", + " \n", + " # calculate and store Gini\n", + " gini = qe.gini_coefficient(y)\n", + " gini_yr.append(gini)\n", + " \n", + " results[var] = gini_yr\n", + "\n", + "# Convert to DataFrame\n", + "results = pd.DataFrame(results, index=years)\n", + "results.to_csv(\"usa-gini-nwealth-tincome-lincome.csv\", index_label='year')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d59e876b-2f77-4fa7-b79a-8e455ad82d43", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/lectures/_static/lecture_specific/inequality/usa-gini-nwealth-tincome-lincome.csv b/lectures/_static/lecture_specific/inequality/usa-gini-nwealth-tincome-lincome.csv index bf820364..3ec95a66 100644 --- a/lectures/_static/lecture_specific/inequality/usa-gini-nwealth-tincome-lincome.csv +++ b/lectures/_static/lecture_specific/inequality/usa-gini-nwealth-tincome-lincome.csv @@ -1,21 +1,21 @@ year,n_wealth,t_income,l_income -1950,0.8257332034366338,0.44248654139458626,0.5342948198773412 -1953,0.8059487586599329,0.4264544060935945,0.5158978980963702 -1956,0.8121790488050616,0.44426942873399283,0.5349293526208142 -1959,0.795206874163792,0.43749348077061573,0.5213985948309416 -1962,0.8086945076579359,0.4435843103853645,0.5345127915054341 -1965,0.7904149225687935,0.43763715466663444,0.7487860020887753 -1968,0.7982885066993497,0.4208620794438902,0.5242396427381545 -1971,0.7911574835420259,0.4233344246090255,0.5576454812313466 -1977,0.7571418922185215,0.46187678800902543,0.5704448110072049 -1983,0.7494335400643013,0.439345618464469,0.5662220844385915 -1989,0.7715705301674302,0.5115249581654197,0.601399568747142 -1992,0.7508126614055308,0.4740650672076798,0.5983592657979563 -1995,0.7569492388110265,0.48965523558400603,0.5969779516716903 -1998,0.7603291991801185,0.49117441585168614,0.5774462841723305 -2001,0.7816118750507056,0.5239092994681135,0.6042739644967272 -2004,0.7700355469522361,0.4884350383903255,0.5981432201792727 -2007,0.7821413776486978,0.5197156312086187,0.626345219575322 -2010,0.8250825295193438,0.5195972120145615,0.6453653328291903 -2013,0.8227698931835303,0.531400174984336,0.6498682917772644 -2016,0.8342975903562234,0.5541400068900825,0.6706846793375284 +1950,0.8257332034366366,0.44248654139458743,0.534294819877344 +1953,0.805948758659935,0.4264544060935942,0.5158978980963682 +1956,0.8121790488050612,0.44426942873399367,0.5349293526208106 +1959,0.7952068741637912,0.43749348077061534,0.5213985948309414 +1962,0.8086945076579386,0.44358431038536356,0.5345127915054446 +1965,0.7904149225687949,0.4376371546666344,0.7487860020887701 +1968,0.7982885066993503,0.4208620794438885,0.5242396427381534 +1971,0.7911574835420282,0.4233344246090255,0.5576454812313462 +1977,0.7571418922185215,0.46187678800902554,0.57044481100722 +1983,0.749433540064301,0.4393456184644682,0.5662220844385925 +1989,0.7715705301674285,0.5115249581654115,0.6013995687471289 +1992,0.7508126614055305,0.4740650672076754,0.5983592657979544 +1995,0.7569492388110274,0.4896552355840001,0.5969779516717039 +1998,0.7603291991801172,0.49117441585168525,0.5774462841723346 +2001,0.781611875050703,0.523909299468113,0.6042739644967232 +2004,0.7700355469522372,0.48843503839032354,0.5981432201792916 +2007,0.782141377648698,0.5197156312086207,0.6263452195753227 +2010,0.825082529519342,0.5195972120145641,0.6453653328291843 +2013,0.8227698931835299,0.5314001749843426,0.6498682917772886 +2016,0.8342975903562537,0.55414000689009,0.6706846793375292 diff --git a/lectures/_static/lecture_specific/long_run_growth/tooze_ch1_graph.png b/lectures/_static/lecture_specific/long_run_growth/tooze_ch1_graph.png index a3833f10..3ae6891e 100644 Binary files a/lectures/_static/lecture_specific/long_run_growth/tooze_ch1_graph.png and b/lectures/_static/lecture_specific/long_run_growth/tooze_ch1_graph.png differ diff --git a/lectures/_static/quant-econ.bib b/lectures/_static/quant-econ.bib index ce5c6e1c..61f867e2 100644 --- a/lectures/_static/quant-econ.bib +++ b/lectures/_static/quant-econ.bib @@ -2,6 +2,47 @@ QuantEcon Bibliography File used in conjuction with sphinxcontrib-bibtex package Note: Extended Information (like abstracts, doi, url's etc.) can be found in quant-econ-extendedinfo.bib file in _static/ ### + + +@book{russell2004history, + title={History of western philosophy}, + author={Russell, Bertrand}, + year={2004}, + publisher={Routledge} +} + +@article{north1989, + title={Constitutions and commitment: the evolution of institutions governing public choice in seventeenth-century England}, + author={North, Douglass C and Weingast, Barry R}, + journal={The journal of economic history}, + volume={49}, + number={4}, + pages={803--832}, + year={1989}, + publisher={Cambridge University Press} +} + +@incollection{keynes1940pay, + title={How to Pay for the War}, + author={Keynes, John Maynard}, + booktitle={Essays in persuasion}, + pages={367--439}, + year={1940}, + publisher={Springer} +} + +@article{bryant1984price, + title={A price discrimination analysis of monetary policy}, + author={Bryant, John and Wallace, Neil}, + journal={The Review of Economic Studies}, + volume={51}, + number={2}, + pages={279--288}, + year={1984}, + publisher={Wiley-Blackwell} +} + + @article{levitt2019did, title={Why did ancient states collapse?: the dysfunctional state}, author={Levitt, Malcolm}, diff --git a/lectures/_toc.yml b/lectures/_toc.yml index 1c33f277..8e4de355 100644 --- a/lectures/_toc.yml +++ b/lectures/_toc.yml @@ -11,6 +11,7 @@ parts: - file: long_run_growth - file: business_cycle - file: inflation_history + - file: french_rev - file: inequality - caption: Foundations numbered: true @@ -55,8 +56,6 @@ parts: - file: unpleasant - file: money_inflation_nonlinear - file: laffer_adaptive - # - file: french_rev - - file: ak2 - caption: Stochastic Dynamics numbered: true chapters: diff --git a/lectures/ak2.md b/lectures/ak2.md deleted file mode 100644 index 67726502..00000000 --- a/lectures/ak2.md +++ /dev/null @@ -1,1280 +0,0 @@ ---- -jupytext: - text_representation: - extension: .md - format_name: myst - format_version: 0.13 - jupytext_version: 1.14.1 -kernelspec: - display_name: Python 3 (ipykernel) - language: python - name: python3 ---- - -# Transitions in an Overlapping Generations Model - -In addition to what’s in Anaconda, this lecture will need the following libraries: - -```{code-cell} ipython3 -:tags: [hide-output] -!pip install --upgrade quantecon -``` - -## Introduction - - -This lecture presents a life-cycle model consisting of overlapping generations of two-period lived people proposed by Peter Diamond -{cite}`diamond1965national`. - -We'll present the version that was analyzed in chapter 2 of Auerbach and -Kotlikoff (1987) {cite}`auerbach1987dynamic`. - -Auerbach and Kotlikoff (1987) used their two period model as a warm-up for their analysis of overlapping generation models of long-lived people that is the main topic of their book. - -Their model of two-period lived overlapping generations is a useful starting point because - -* it sets forth the structure of interactions between generations of different agents who are alive at a given date -* it activates forces and tradeoffs confronting the government and successive generations of people -* it is good laboratory for studying connections between government tax and subsidy programs and for policies for issuing and servicing government debt -* some interesting experiments involving transitions from one steady state to another can be computed by hand -* it is a good setting for illustrating a **shooting method** for solving a system of non-linear difference equations with initial and terminal condition - - ```{note} -Auerbach and Kotlikoff use computer code to calculate transition paths for their models with long-lived people. -``` - -We take the liberty of extending Auerbach and Kotlikoff's chapter 2 model to study some arrangements for redistributing resources across generations - - * these take the form of a sequence of age-specific lump sum taxes and transfers - -We study how these arrangements affect capital accumulation and government debt - -## Setting - -Time is discrete and is indexed by $t=0, 1, 2, \ldots$. - -The economy lives forever, but the people inside it do not. - -At each time $ t \geq 0$ a representative old person and a representative young person are alive. - -At time $t$ a representative old person coexists with a representative young person who will become an old person at time $t+1$. - -We assume that the population size is constant over time. - -A young person works, saves, and consumes. - -An old person dissaves and consumes, but does not work, - -A government lives forever, i.e., at $t=0, 1, 2, \ldots $. - -Each period $t \geq 0$, the government taxes, spends, transfers, and borrows. - - - - -Initial conditions set outside the model at time $t=0$ are - -* $K_0$ -- initial capital stock brought into time $t=0$ by a representative initial old person -* $D_0$ -- government debt falling due at $t=0$ and owned by a representative old person at time $t=0$ - -$K_0$ and $D_0$ are both measured in units of time $0$ goods. - -A government **policy** consists of five sequences $\{G_t, D_t, \tau_t, \delta_{ot}, \delta_{yt}\}_{t=0}^\infty $ whose components are - - * $\tau_t$ -- flat rate tax at time $t$ on wages and earnings from capital and government bonds - * $D_t$ -- one-period government bond principal due at time $t$, per capita - * $G_t$ -- government purchases of goods at time $t$, per capita - * $\delta_{yt}$ -- a lump sum tax on each young person at time $t$ - * $\delta_{ot}$ -- a lump sum tax on each old person at time $t$ - - - -An **allocation** is a collection of sequences $\{C_{yt}, C_{ot}, K_{t+1}, L_t, Y_t, G_t\}_{t=0}^\infty $; constituents of the sequences include - - * $K_t$ -- physical capital per capita - * $L_t$ -- labor per capita - * $Y_t$ -- output per capita - -and also - -* $C_{yt}$ -- consumption of young person at time $t \geq 0$ -* $C_{ot}$ -- consumption of old person at time $t \geq 0$ -* $K_{t+1} - K_t \equiv I_t $ -- investment in physical capital at time $t \geq 0$ -* $G_t$ -- government purchases - -National income and product accounts consist of a sequence of equalities - -* $Y_t = C_{yt} + C_{ot} + (K_{t+1} - K_t) + G_t, \quad t \geq 0$ - -A **price system** is a pair of sequences $\{W_t, r_t\}_{t=0}^\infty$; constituents of a price sequence include rental rates for the factors of production - -* $W_t$ -- rental rate for labor at time $t \geq 0$ -* $r_t$ -- rental rate for capital at time $t \geq 0$ - - -## Production - -There are two factors of production, physical capital $K_t$ and labor $L_t$. - -Capital does not depreciate. - -The initial capital stock $K_0$ is owned by the representative initial old person, who rents it to the firm at time $0$. - -Net investment rate $I_t$ at time $t$ is - -$$ -I_t = K_{t+1} - K_t -$$ - -The capital stock at time $t$ emerges from cumulating past rates of investment: - -$$ -K_t = K_0 + \sum_{s=0}^{t-1} I_s -$$ - -A Cobb-Douglas technology converts physical capital $K_t$ and labor services $L_t$ into -output $Y_t$ - -$$ -Y_t = K_t^\alpha L_t^{1-\alpha}, \quad \alpha \in (0,1) -$$ (eq:prodfn) - - -## Government - -At time $t-1$, the government issues one-period risk-free debt that promises to pay $D_t$ time $t$ goods per capita at time $t$. - -Young people at time $t$ purchase government debt $D_{t+1}$ that matures at time $t+1$. - -Government debt issued at $t$ bears a before-tax net rate of interest rate of $r_{t}$ at time $t+1$. - -The government budget constraint at time $t \geq 0$ is - -$$ -D_{t+1} - D_t = r_t D_t + G_t - T_t -$$ - -or - - - - -$$ -D_{t+1} = (1 + r_t) D_t + G_t - T_t . -$$ (eq:govbudgetsequence) - -Total tax collections net of transfers equal $T_t$ and satisfy - - -$$ -T_t = \tau_t W_t L_t + \tau_t r_t (D_t + K_t) + \delta_{yt} + \delta_{ot} -$$ - - - - -## Activities in Factor Markets - -**Old people:** At each $t \geq 0$, a representative old person - - * brings $K_t$ and $D_t$ into the period, - * rents capital to a representative firm for $r_{t} K_t$, - * pays taxes $\tau_t r_t (K_t+ D_t)$ on its rental and interest earnings, - * pays a lump sum tax $\delta_{ot}$ to the government, - * sells $K_t$ to a young person. - - - **Young people:** At each $t \geq 0$, a representative young person - * sells one unit of labor services to a representative firm for $W_t$ in wages, - * pays taxes $\tau_t W_t$ on its labor earnings - * pays a lump sum tax $\delta_{yt}$ to the goverment, - * spends $C_{yt}$ on consumption, - * acquires non-negative assets $A_{t+1}$ consisting of a sum of physical capital $K_{t+1}$ and one-period government bonds $D_{t+1}$ that mature at $t+1$. - -```{note} -If a lump-sum tax is negative, it means that the government pays the person a subsidy. -``` - - -## Representative firm's problem - -The representative firm hires labor services from young people at competitive wage rate $W_t$ and hires capital from old people at competitive rental rate -$r_t$. - -The rental rate on capital $r_t$ equals the interest rate on government one-period bonds. - -Units of the rental rates are: - -* for $W_t$, output at time $t$ per unit of labor at time $t$ -* for $r_t$, output at time $t$ per unit of capital at time $t$ - - -We take output at time $t$ as *numeraire*, so the price of output at time $t$ is one. - -The firm's profits at time $t$ are - -$$ -K_t^\alpha L_t^{1-\alpha} - r_t K_t - W_t L_t . -$$ - -To maximize profits a firm equates marginal products to rental rates: - -$$ -\begin{aligned} -W_t & = (1-\alpha) K_t^\alpha L_t^{-\alpha} \\ -r_t & = \alpha K_t^\alpha L_t^{1-\alpha} -\end{aligned} -$$ (eq:firmfonc) - -Output can be consumed either by old people or young people; or sold to young people who use it to augment the capital stock; or sold to the government for uses that do not generate utility for the people in the model (i.e., ``it is thrown into the ocean''). - - -The firm thus sells output to old people, young people, and the government. - - - - - - - - - -## Individuals' problems - -### Initial old person - -At time $t=0$, a representative initial old person is endowed with $(1 + r_0(1 - \tau_0)) A_0$ in initial assets. - -It must pay a lump sum tax to (if positive) or receive a subsidy from (if negative) -$\delta_{ot}$ the government. - -An old person's budget constraint is - - - -$$ -C_{o0} = (1 + r_0 (1 - \tau_0)) A_0 - \delta_{ot} . -$$ (eq:hbudgetold) - -An initial old person's utility function is $C_{o0}$, so the person's optimal consumption plan -is provided by equation {eq}`eq:hbudgetold`. - -### Young person - -At each $t \geq 0$, a young person inelastically supplies one unit of labor and in return -receives pre-tax labor earnings of $W_t$ units of output. - -A young person's post-tax-and-transfer earnings are $W_t (1 - \tau_t) - \delta_{yt}$. - -At each $t \geq 0$, a young person chooses a consumption plan $C_{yt}, C_{ot+1}$ -to maximize the Cobb-Douglas utility function - -$$ -U_t = C_{yt}^\beta C_{o,t+1}^{1-\beta}, \quad \beta \in (0,1) -$$ (eq:utilfn) - -subject to the following budget constraints at times $t$ and $t+1$: - -$$ -\begin{aligned} -C_{yt} + A_{t+1} & = W_t (1 - \tau_t) - \delta_{yt} \\ -C_{ot+1} & = (1+ r_{t+1} (1 - \tau_{t+1}))A_{t+1} - \delta_{ot} -\end{aligned} -$$ (eq:twobudgetc) - - -Solving the second equation of {eq}`eq:twobudgetc` for savings $A_{t+1}$ and substituting it into the first equation implies the present value budget constraint - -$$ -C_{yt} + \frac{C_{ot+1}}{1 + r_{t+1}(1 - \tau_{t+1})} = W_t (1 - \tau_t) - \delta_{yt} - \frac{\delta_{ot}}{1 + r_{t+1}(1 - \tau_{t+1})} -$$ (eq:onebudgetc) - -To solve the young person's choice problem, form a Lagrangian - -$$ -\begin{aligned} -{\mathcal L} & = C_{yt}^\beta C_{o,t+1}^{1-\beta} \\ & + \lambda \Bigl[ C_{yt} + \frac{C_{ot+1}}{1 + r_{t+1}(1 - \tau_{t+1})} - W_t (1 - \tau_t) + \delta_{yt} + \frac{\delta_{ot}}{1 + r_{t+1}(1 - \tau_{t+1})}\Bigr], -\end{aligned} -$$ (eq:lagC) - -where $\lambda$ is a Lagrange multiplier on the intertemporal budget constraint {eq}`eq:onebudgetc`. - - -After several lines of algebra, the intertemporal budget constraint {eq}`eq:onebudgetc` and the first-order conditions for maximizing ${\mathcal L}$ with respect to $C_{yt}, C_{ot+1}$ -imply that an optimal consumption plan satisfies - -$$ -\begin{aligned} -C_{yt} & = \beta \Bigl[ W_t (1 - \tau_t) - \delta_{yt} - \frac{\delta_{ot}}{1 + r_{t+1}(1 - \tau_{t+1})}\Bigr] \\ -\frac{C_{0t+1}}{1 + r_{t+1}(1-\tau_{t+1}) } & = (1-\beta) \Bigl[ W_t (1 - \tau_t) - \delta_{yt} - \frac{\delta_{ot}}{1 + r_{t+1}(1 - \tau_{t+1})}\Bigr] -\end{aligned} -$$ (eq:optconsplan) - -The first-order condition for minimizing Lagrangian {eq}`eq:lagC` with respect to the Lagrange multipler $\lambda$ recovers the budget constraint {eq}`eq:onebudgetc`, -which, using {eq}`eq:optconsplan` gives the optimal savings plan - -$$ -A_{t+1} = (1-\beta) [ (1- \tau_t) W_t - \delta_{yt}] + \beta \frac{\delta_{ot}}{1 + r_{t+1}(1 - \tau_{t+1})} -$$ (eq:optsavingsplan) - - -(sec-equilibrium)= -## Equilbrium - -**Definition:** An equilibrium is an allocation, a government policy, and a price system with the properties that -* given the price system and the government policy, the allocation solves - * representative firms' problems for $t \geq 0$ - * individual persons' problems for $t \geq 0$ -* given the price system and the allocation, the government budget constraint is satisfied for all $t \geq 0$. - - -## Next steps - - -To begin our analysis of equilibrium outcomes, we'll study the special case of the model with which Auerbach and -Kotlikoff (1987) {cite}`auerbach1987dynamic` began their analysis in chapter 2. - -It can be solved by hand. - -We shall do that next. - -After we derive a closed form solution, we'll pretend that we don't know and will compute equilibrium outcome paths. - -We'll do that by first formulating an equilibrium as a fixed point of a mapping from sequences of factor prices and tax rates to sequences of factor prices and tax rates. - -We'll compute an equilibrium by iterating to convergence on that mapping. - - -## Closed form solution - -To get the special chapter 2 case of Auerbach and Kotlikoff (1987) {cite}`auerbach1987dynamic`, we set both $\delta_{ot}$ and $\delta_{yt}$ to zero. - -As our special case of {eq}`eq:optconsplan`, we compute the following consumption-savings plan for a representative young person: - - -$$ -\begin{aligned} -C_{yt} & = \beta (1 - \tau_t) W_t \\ -A_{t+1} &= (1-\beta) (1- \tau_t) W_t -\end{aligned} -$$ - -Using {eq}`eq:firmfonc` and $A_t = K_t + D_t$, we obtain the following closed form transition law for capital: - -$$ -K_{t+1}=K_{t}^{\alpha}\left(1-\tau_{t}\right)\left(1-\alpha\right)\left(1-\beta\right) - D_{t}\\ -$$ (eq:Klawclosed) - -### Steady states - -From {eq}`eq:Klawclosed` and the government budget constraint {eq}`eq:govbudgetsequence`, we compute **time-invariant** or **steady state values** $\hat K, \hat D, \hat T$: - -$$ -\begin{aligned} -\hat{K} &=\hat{K}\left(1-\hat{\tau}\right)\left(1-\alpha\right)\left(1-\beta\right) - \hat{D} \\ -\hat{D} &= (1 + \hat{r}) \hat{D} + \hat{G} - \hat{T} \\ -\hat{T} &= \hat{\tau} \hat{Y} + \hat{\tau} \hat{r} \hat{D} . -\end{aligned} -$$ (eq:steadystates) - -These imply - -$$ -\begin{aligned} -\hat{K} &= \left[\left(1-\hat{\tau}\right)\left(1-\alpha\right)\left(1-\beta\right)\right]^{\frac{1}{1-\alpha}} \\ -\hat{\tau} &= \frac{\hat{G} + \hat{r} \hat{D}}{\hat{Y} + \hat{r} \hat{D}} -\end{aligned} -$$ - -Let's take an example in which - -1. there is no initial government debt, $D_t=0$, -2. government consumption $G_t$ equals $15\%$ of output $Y_t$ - -Our formulas for steady-state values tell us that - -$$ -\begin{aligned} -\hat{D} &= 0 \\ -\hat{G} &= 0.15 \hat{Y} \\ -\hat{\tau} &= 0.15 \\ -\end{aligned} -$$ - - - -### Implementation - -```{code-cell} ipython3 -import numpy as np -import matplotlib.pyplot as plt -from numba import njit -from quantecon.optimize import brent_max -``` - - -For parameters $\alpha = 0.3$ and $\beta = 0.5$, let's compute $\hat{K}$: - -```{code-cell} ipython3 -# parameters -α = 0.3 -β = 0.5 - -# steady states of τ and D -τ_hat = 0.15 -D_hat = 0. - -# solve for steady state of K -K_hat = ((1 - τ_hat) * (1 - α) * (1 - β)) ** (1 / (1 - α)) -K_hat -``` -Knowing $\hat K$, we can calculate other equilibrium objects. - -Let's first define some Python helper functions. - -```{code-cell} ipython3 -@njit -def K_to_Y(K, α): - - return K ** α - -@njit -def K_to_r(K, α): - - return α * K ** (α - 1) - -@njit -def K_to_W(K, α): - - return (1 - α) * K ** α - -@njit -def K_to_C(K, D, τ, r, α, β): - - # optimal consumption for the old when δ=0 - A = K + D - Co = A * (1 + r * (1 - τ)) - - # optimal consumption for the young when δ=0 - W = K_to_W(K, α) - Cy = β * W * (1 - τ) - - return Cy, Co -``` - -We can use these helper functions to obtain steady state values $\hat{Y}$, $\hat{r}$, and $\hat{W}$ associated with steady state values $\hat{K}$ and $\hat{r}$. - -```{code-cell} ipython3 -Y_hat, r_hat, W_hat = K_to_Y(K_hat, α), K_to_r(K_hat, α), K_to_W(K_hat, α) -Y_hat, r_hat, W_hat -``` - -Since steady state government debt $\hat{D}$ is $0$, all taxes are used to pay for government expenditures - -```{code-cell} ipython3 -G_hat = τ_hat * Y_hat -G_hat -``` - -We use the optimal consumption plans to find steady state consumptions for young and old - -```{code-cell} ipython3 -Cy_hat, Co_hat = K_to_C(K_hat, D_hat, τ_hat, r_hat, α, β) -Cy_hat, Co_hat -``` - -Let's store the steady state quantities and prices using an array called `init_ss` - -```{code-cell} ipython3 -init_ss = np.array([K_hat, Y_hat, Cy_hat, Co_hat, # quantities - W_hat, r_hat, # prices - τ_hat, D_hat, G_hat # policies - ]) -``` - - -### Transitions - - - -We have computed a steady state in which the government policy sequences are each constant over time. - - -We'll use this steady state as an initial condition at time $t=0$ for another economy in which government policy sequences are with time-varying sequences. - -To make sense of our calculation, we'll treat $t=0$ as time when a huge unanticipated shock occurs in the form of - - * a time-varying government policy sequences that disrupts an original steady state - * new government policy sequences are eventually time-invariant in the sense that after some date $T >0$, each sequence is constant over time. - * sudden revelation of a new government policy in the form of sequences starting at time $t=0$ - -We assume that everyone, including old people at time $t=0$, knows the new government policy sequence and chooses accordingly. - - - - -As the capital stock and other aggregates adjust to the fiscal policy change over time, the economy will approach a new steady state. - -We can find a transition path from an old steady state to a new steady state by employing a fixed-point algorithm in a space of sequences. - -But in our special case with its closed form solution, we have available a simpler and faster -approach. - -Here we define a Python class `ClosedFormTrans` that computes length $T$ transition path in response to a particular fiscal policy change. - -We choose $T$ large enough so that we have gotten very close to a new steady state after $T$ periods. - -The class takes three keyword arguments, `τ_pol`, `D_pol`, and `G_pol`. - -These are sequences of tax rate, government debt level, and government purchases, respectively. - -In each policy experiment below, we will pass two out of three as inputs required to depict a fiscal policy. - -We'll then compute the single remaining undetermined policy variable from the government budget constraint. - -When we simulate transition paths, it is useful to distinguish **state variables** at time $t$ such as $K_t, Y_t, D_t, W_t, r_t$ from **control variables** that include $C_{yt}, C_{ot}, \tau_{t}, G_t$. - -```{code-cell} ipython3 -class ClosedFormTrans: - """ - This class simulates length T transitional path of a economy - in response to a fiscal policy change given its initial steady - state. The simulation is based on the closed form solution when - the lump sum taxations are absent. - - """ - - def __init__(self, α, β): - - self.α, self.β = α, β - - def simulate(self, - T, # length of transitional path to simulate - init_ss, # initial steady state - τ_pol=None, # sequence of tax rates - D_pol=None, # sequence of government debt levels - G_pol=None): # sequence of government purchases - - α, β = self.α, self.β - - # unpack the steady state variables - K_hat, Y_hat, Cy_hat, Co_hat = init_ss[:4] - W_hat, r_hat = init_ss[4:6] - τ_hat, D_hat, G_hat = init_ss[6:9] - - # initialize array containers - # K, Y, Cy, Co - quant_seq = np.empty((T+1, 4)) - - # W, r - price_seq = np.empty((T+1, 2)) - - # τ, D, G - policy_seq = np.empty((T+2, 3)) - - # t=0, starting from steady state - K0, Y0 = K_hat, Y_hat - W0, r0 = W_hat, r_hat - D0 = D_hat - - # fiscal policy - if τ_pol is None: - D1 = D_pol[1] - G0 = G_pol[0] - τ0 = (G0 + (1 + r0) * D0 - D1) / (Y0 + r0 * D0) - elif D_pol is None: - τ0 = τ_pol[0] - G0 = G_pol[0] - D1 = (1 + r0) * D0 + G0 - τ0 * (Y0 + r0 * D0) - elif G_pol is None: - D1 = D_pol[1] - τ0 = τ_pol[0] - G0 = τ0 * (Y0 + r0 * D0) + D1 - (1 + r0) * D0 - - # optimal consumption plans - Cy0, Co0 = K_to_C(K0, D0, τ0, r0, α, β) - - # t=0 economy - quant_seq[0, :] = K0, Y0, Cy0, Co0 - price_seq[0, :] = W0, r0 - policy_seq[0, :] = τ0, D0, G0 - policy_seq[1, 1] = D1 - - # starting from t=1 to T - for t in range(1, T+1): - - # transition of K - K_old, τ_old = quant_seq[t-1, 0], policy_seq[t-1, 0] - D = policy_seq[t, 1] - K = K_old ** α * (1 - τ_old) * (1 - α) * (1 - β) - D - - # output, capital return, wage - Y, r, W = K_to_Y(K, α), K_to_r(K, α), K_to_W(K, α) - - # to satisfy the government budget constraint - if τ_pol is None: - D = D_pol[t] - D_next = D_pol[t+1] - G = G_pol[t] - τ = (G + (1 + r) * D - D_next) / (Y + r * D) - elif D_pol is None: - τ = τ_pol[t] - G = G_pol[t] - D = policy_seq[t, 1] - D_next = (1 + r) * D + G - τ * (Y + r * D) - elif G_pol is None: - D = D_pol[t] - D_next = D_pol[t+1] - τ = τ_pol[t] - G = τ * (Y + r * D) + D_next - (1 + r) * D - - # optimal consumption plans - Cy, Co = K_to_C(K, D, τ, r, α, β) - - # store time t economy aggregates - quant_seq[t, :] = K, Y, Cy, Co - price_seq[t, :] = W, r - policy_seq[t, 0] = τ - policy_seq[t+1, 1] = D_next - policy_seq[t, 2] = G - - self.quant_seq = quant_seq - self.price_seq = price_seq - self.policy_seq = policy_seq - - return quant_seq, price_seq, policy_seq - - def plot(self): - - quant_seq = self.quant_seq - price_seq = self.price_seq - policy_seq = self.policy_seq - - fig, axs = plt.subplots(3, 3, figsize=(14, 10)) - - # quantities - for i, name in enumerate(['K', 'Y', 'Cy', 'Co']): - ax = axs[i//3, i%3] - ax.plot(range(T+1), quant_seq[:T+1, i], label=name) - ax.hlines(init_ss[i], 0, T+1, color='r', linestyle='--') - ax.legend() - ax.set_xlabel('t') - - # prices - for i, name in enumerate(['W', 'r']): - ax = axs[(i+4)//3, (i+4)%3] - ax.plot(range(T+1), price_seq[:T+1, i], label=name) - ax.hlines(init_ss[i+4], 0, T+1, color='r', linestyle='--') - ax.legend() - ax.set_xlabel('t') - - # policies - for i, name in enumerate(['τ', 'D', 'G']): - ax = axs[(i+6)//3, (i+6)%3] - ax.plot(range(T+1), policy_seq[:T+1, i], label=name) - ax.hlines(init_ss[i+6], 0, T+1, color='r', linestyle='--') - ax.legend() - ax.set_xlabel('t') -``` - -We can create an instance `closed` for model parameters $\{\alpha, \beta\}$ and use it for various fiscal policy experiments. - - -```{code-cell} ipython3 -closed = ClosedFormTrans(α, β) -``` - -(exp-tax-cut)= -### Experiment 1: Tax cut - -To illustrate the power of `ClosedFormTrans`, let's first experiment with the following fiscal policy change: - -1. at $t=0$, the government unexpectedly announces a one-period tax cut, $\tau_0 =(1-\frac{1}{3}) \hat{\tau}$, by issuing government debt $\bar{D}$ -2. from $t=1$, the government will keep $D_t=\bar{D}$ and adjust $\tau_{t}$ to collect taxation to pay for the government consumption and interest payments on the debt -3. government consumption $G_t$ will be fixed at $0.15 \hat{Y}$ - -The following equations completely characterize the equilibrium transition path originating from the initial steady state - -$$ -\begin{aligned} -K_{t+1} &= K_{t}^{\alpha}\left(1-\tau_{t}\right)\left(1-\alpha\right)\left(1-\beta\right) - \bar{D} \\ -\tau_{0} &= (1-\frac{1}{3}) \hat{\tau} \\ -\bar{D} &= \hat{G} - \tau_0\hat{Y} \\ -\quad\tau_{t} & =\frac{\hat{G}+r_{t} \bar{D}}{\hat{Y}+r_{t} \bar{D}} -\end{aligned} -$$ - -We can simulate the transition for $20$ periods, after which the economy will be close to a new steady state. - -The first step is to prepare sequences of policy variables that describe fiscal policy. - -We must define sequences of government expenditure $\{G_t\}_{t=0}^{T}$ and debt level $\{D_t\}_{t=0}^{T+1}$ in advance, then pass them to the solver. - -```{code-cell} ipython3 -T = 20 - -# tax cut -τ0 = τ_hat * (1 - 1/3) - -# sequence of government purchase -G_seq = τ_hat * Y_hat * np.ones(T+1) - -# sequence of government debt -D_bar = G_hat - τ0 * Y_hat -D_seq = np.ones(T+2) * D_bar -D_seq[0] = D_hat -``` - -Let's use the `simulate` method of `closed` to compute dynamic transitions. - -Note that we leave `τ_pol` as `None`, since the tax rates need to be determined to satisfy the government budget constraint. - -```{code-cell} ipython3 -quant_seq1, price_seq1, policy_seq1 = closed.simulate(T, init_ss, - D_pol=D_seq, - G_pol=G_seq) -closed.plot() -``` - -We can also experiment with a lower tax cut rate, such as $0.2$. - -```{code-cell} ipython3 -# lower tax cut rate -τ0 = 0.15 * (1 - 0.2) - -# the corresponding debt sequence -D_bar = G_hat - τ0 * Y_hat -D_seq = np.ones(T+2) * D_bar -D_seq[0] = D_hat - -quant_seq2, price_seq2, policy_seq2 = closed.simulate(T, init_ss, - D_pol=D_seq, - G_pol=G_seq) -``` - -```{code-cell} ipython3 -fig, axs = plt.subplots(3, 3, figsize=(14, 10)) - -# quantities -for i, name in enumerate(['K', 'Y', 'Cy', 'Co']): - ax = axs[i//3, i%3] - ax.plot(range(T+1), quant_seq1[:T+1, i], label=name+', 1/3') - ax.plot(range(T+1), quant_seq2[:T+1, i], label=name+', 0.2') - ax.hlines(init_ss[i], 0, T+1, color='r', linestyle='--') - ax.legend() - ax.set_xlabel('t') - -# prices -for i, name in enumerate(['W', 'r']): - ax = axs[(i+4)//3, (i+4)%3] - ax.plot(range(T+1), price_seq1[:T+1, i], label=name+', 1/3') - ax.plot(range(T+1), price_seq2[:T+1, i], label=name+', 0.2') - ax.hlines(init_ss[i+4], 0, T+1, color='r', linestyle='--') - ax.legend() - ax.set_xlabel('t') - -# policies -for i, name in enumerate(['τ', 'D', 'G']): - ax = axs[(i+6)//3, (i+6)%3] - ax.plot(range(T+1), policy_seq1[:T+1, i], label=name+', 1/3') - ax.plot(range(T+1), policy_seq2[:T+1, i], label=name+', 0.2') - ax.hlines(init_ss[i+6], 0, T+1, color='r', linestyle='--') - ax.legend() - ax.set_xlabel('t') -``` - -The economy with lower tax cut rate at $t=0$ has the same transitional pattern, but is less distorted, and it converges to a new steady state with higher physical capital stock. - -(exp-expen-cut)= -### Experiment 2: Government asset accumulation - -Assume that the economy is initially in the same steady state. - -Now the government promises to cut its spending on services and goods by half $\forall t \geq 0$. - -The government targets the same tax rate $\tau_t=\hat{\tau}$ and to accumulate assets $-D_t$ over time. - -To conduct this experiment, we pass `τ_seq` and `G_seq` as inputs and let `D_pol` be determined along the path by satisfying the government budget constraint. - -```{code-cell} ipython3 -# government expenditure cut by a half -G_seq = τ_hat * 0.5 * Y_hat * np.ones(T+1) - -# targeted tax rate -τ_seq = τ_hat * np.ones(T+1) - -closed.simulate(T, init_ss, τ_pol=τ_seq, G_pol=G_seq); -closed.plot() -``` - -As the government accumulates the asset and uses it in production, the rental rate on capital falls and private investment falls. - -As a result, the ratio $-\frac{D_t}{K_t}$ of the government asset to physical capital used in production will increase over time - -```{code-cell} ipython3 -plt.plot(range(T+1), -closed.policy_seq[:-1, 1] / closed.quant_seq[:, 0]) -plt.xlabel('t') -plt.title('-D/K'); -``` - -We want to know how this policy experiment affects individuals. - -In the long run, future cohorts will enjoy higher consumption throughout their lives because they will earn higher labor income when they work. - -However, in the short run, old people suffer because increases in their labor income are not big enough to offset their losses of capital income. - -Such distinct long run and short run effects motivate us to study transition paths. - -```{note} -Although the consumptions in the new steady state are strictly higher, it is at a cost of fewer public services and goods. -``` - - -### Experiment 3: Temporary expenditure cut - -Let's now investigate a scenario in which the government also cuts its spending by half and accumulates the asset. - -But now let the government cut its expenditures only at $t=0$. - -From $t \geq 1$, the government expeditures return to $\hat{G}$ and $\tau_t$ adjusts to maintain the asset level $-D_t = -D_1$. - -```{code-cell} ipython3 -# sequence of government purchase -G_seq = τ_hat * Y_hat * np.ones(T+1) -G_seq[0] = 0 - -# sequence of government debt -D_bar = G_seq[0] - τ_hat * Y_hat -D_seq = D_bar * np.ones(T+2) -D_seq[0] = D_hat - -closed.simulate(T, init_ss, D_pol=D_seq, G_pol=G_seq); -closed.plot() -``` - -The economy quickly converges to a new steady state with higher physical capital stock, lower interest rate, higher wage rate, and higher consumptions for both the young and the old. - -Even though government expenditure $G_t$ returns to its high initial level from $t \geq 1$, the government can balance the budget at a lower tax rate because it gathers additional revenue $-r_t D_t$ from the asset accumulated during the temporary cut in the spendings. - -As in {ref}`exp-expen-cut`, old perople early in the transition periods suffer from this policy shock. - - -## A computational strategy - -With the preceding caluations, we studied dynamic transitions instigated by alternative fiscal policies. - -In all these experiments, we maintained the assumption that lump sum taxes were absent so that $\delta_{yt}=0, \delta_{ot}=0$. - -In this section, we investigate the transition dynamics when the lump sum taxes are present. - -The government will use lump sum taxes and transfers to redistribute resources across successive -generations. - -Including lump sum taxes disrupts closed form solution because of how they make optimal consumption and saving plans depend on future prices and tax rates. - -Therefore, we compute equilibrium transitional paths by finding a fixed point of a mapping from sequences to sequences. - - * that fixed point pins down an equilibrium - -To set the stage for the entry of the mapping whose fixed point we seek, we return to concepts introduced in - section {ref}`sec-equilibrium`. - - -**Definition:** Given parameters $\{\alpha$, $\beta\}$, a competitive equilibrium consists of - -* sequences of optimal consumptions $\{C_{yt}, C_{ot}\}$ -* sequences of prices $\{W_t, r_t\}$ -* sequences of capital stock and output $\{K_t, Y_t\}$ -* sequences of tax rates, government assets (debt), government purchases $\{\tau_t, D_t, G_t\, \delta_{yt}, \delta_{ot}\}$ - -with the properties that - -* given the price system and government fiscal policy, consumption plans are optimal -* the government budget constraints are satisfied for all $t$ - -An equilibrium transition path can be computed by "guessing and verifying" some endogenous sequences. - -In our {ref}`exp-tax-cut` example, sequences $\{D_t\}_{t=0}^{T}$ and $\{G_t\}_{t=0}^{T}$ are exogenous. - -In addition, we assume that the lump sum taxes $\{\delta_{yt}, \delta_{ot}\}_{t=0}^{T}$ are given and known to everybody inside the model. - -We can solve for sequences of other equilibrium sequences following the steps below - -1. guess prices $\{W_t, r_t\}_{t=0}^{T}$ and tax rates $\{\tau_t\}_{t=0}^{T}$ -2. solve for optimal consumption and saving plans $\{C_{yt}, C_{ot}\}_{t=0}^{T}$, treating the guesses of future prices and taxes as true -3. solve for transition of the capital stock $\{K_t\}_{t=0}^{T}$ -4. update the guesses for prices and tax rates with the values implied by the equilibrium conditions -5. iterate until convergence - -Let's implement this "guess and verify" approach - -We start by defining the Cobb-Douglas utility function - -```{code-cell} ipython3 -@njit -def U(Cy, Co, β): - - return (Cy ** β) * (Co ** (1-β)) -``` - -We use `Cy_val` to compute the lifetime value of an arbitrary consumption plan, $C_y$, given the intertemporal budget constraint. - -Note that it requires knowing future prices $r_{t+1}$ and tax rate $\tau_{t+1}$. - -```{code-cell} ipython3 -@njit -def Cy_val(Cy, W, r_next, τ, τ_next, δy, δo_next, β): - - # Co given by the budget constraint - Co = (W * (1 - τ) - δy - Cy) * (1 + r_next * (1 - τ_next)) - δo_next - - return U(Cy, Co, β) -``` - -An optimal consumption plan $C_y^*$ can be found by maximizing `Cy_val`. - -Here is an example that computes optimal consumption $C_y^*=\hat{C}_y$ in the steady state with $\delta_{yt}=\delta_{ot}=0,$ like one that we studied earlier - -```{code-cell} ipython3 -W, r_next, τ, τ_next = W_hat, r_hat, τ_hat, τ_hat -δy, δo_next = 0, 0 - -Cy_opt, U_opt, _ = brent_max(Cy_val, # maximand - 1e-6, # lower bound - W*(1-τ)-δy-1e-6, # upper bound - args=(W, r_next, τ, τ_next, δy, δo_next, β)) - -Cy_opt, U_opt -``` - -Let's define a Python class `AK2` that computes the transition paths with the fixed-point algorithm. - -It can handle nonzero lump sum taxes - -```{code-cell} ipython3 -class AK2(): - """ - This class simulates length T transitional path of a economy - in response to a fiscal policy change given its initial steady - state. The transitional path is found by employing a fixed point - algorithm to satisfy the equilibrium conditions. - - """ - - def __init__(self, α, β): - - self.α, self.β = α, β - - def simulate(self, - T, # length of transitional path to simulate - init_ss, # initial steady state - δy_seq, # sequence of lump sum tax for the young - δo_seq, # sequence of lump sum tax for the old - τ_pol=None, # sequence of tax rates - D_pol=None, # sequence of government debt levels - G_pol=None, # sequence of government purchases - verbose=False, - max_iter=500, - tol=1e-5): - - α, β = self.α, self.β - - # unpack the steady state variables - K_hat, Y_hat, Cy_hat, Co_hat = init_ss[:4] - W_hat, r_hat = init_ss[4:6] - τ_hat, D_hat, G_hat = init_ss[6:9] - - # K, Y, Cy, Co - quant_seq = np.empty((T+2, 4)) - - # W, r - price_seq = np.empty((T+2, 2)) - - # τ, D, G - policy_seq = np.empty((T+2, 3)) - policy_seq[:, 1] = D_pol - policy_seq[:, 2] = G_pol - - # initial guesses of prices - price_seq[:, 0] = np.ones(T+2) * W_hat - price_seq[:, 1] = np.ones(T+2) * r_hat - - # initial guesses of policies - policy_seq[:, 0] = np.ones(T+2) * τ_hat - - # t=0, starting from steady state - quant_seq[0, :2] = K_hat, Y_hat - - if verbose: - # prepare to plot iterations until convergence - fig, axs = plt.subplots(1, 3, figsize=(14, 4)) - - # containers for checking convergence - price_seq_old = np.empty_like(price_seq) - policy_seq_old = np.empty_like(policy_seq) - - # start iteration - i_iter = 0 - while True: - - if verbose: - # plot current prices at ith iteration - for i, name in enumerate(['W', 'r']): - axs[i].plot(range(T+1), price_seq[:T+1, i]) - axs[i].set_title(name) - axs[i].set_xlabel('t') - axs[2].plot(range(T+1), policy_seq[:T+1, 0], - label=f'{i_iter}th iteration') - axs[2].legend(bbox_to_anchor=(1.05, 1), loc='upper left') - axs[2].set_title('τ') - axs[2].set_xlabel('t') - - # store old prices from last iteration - price_seq_old[:] = price_seq - policy_seq_old[:] = policy_seq - - # start updating quantities and prices - for t in range(T+1): - K, Y = quant_seq[t, :2] - W, r = price_seq[t, :] - r_next = price_seq[t+1, 1] - τ, D, G = policy_seq[t, :] - τ_next, D_next, G_next = policy_seq[t+1, :] - δy, δo = δy_seq[t], δo_seq[t] - δy_next, δo_next = δy_seq[t+1], δo_seq[t+1] - - # consumption for the old - Co = (1 + r * (1 - τ)) * (K + D) - δo - - # optimal consumption for the young - out = brent_max(Cy_val, 1e-6, W*(1-τ)-δy-1e-6, - args=(W, r_next, τ, τ_next, - δy, δo_next, β)) - Cy = out[0] - - quant_seq[t, 2:] = Cy, Co - τ_num = ((1 + r) * D + G - D_next - δy - δo) - τ_denom = (Y + r * D) - policy_seq[t, 0] = τ_num / τ_denom - - # saving of the young - A_next = W * (1 - τ) - δy - Cy - - # transition of K - K_next = A_next - D_next - Y_next = K_to_Y(K_next, α) - W_next, r_next = K_to_W(K_next, α), K_to_r(K_next, α) - - quant_seq[t+1, :2] = K_next, Y_next - price_seq[t+1, :] = W_next, r_next - - i_iter += 1 - - if (np.max(np.abs(price_seq_old - price_seq)) < tol) & \ - (np.max(np.abs(policy_seq_old - policy_seq)) < tol): - if verbose: - print(f"Converge using {i_iter} iterations") - break - - if i_iter > max_iter: - if verbose: - print(f"Fail to converge using {i_iter} iterations") - break - - self.quant_seq = quant_seq - self.price_seq = price_seq - self.policy_seq = policy_seq - - return quant_seq, price_seq, policy_seq - - def plot(self): - - quant_seq = self.quant_seq - price_seq = self.price_seq - policy_seq = self.policy_seq - - fig, axs = plt.subplots(3, 3, figsize=(14, 10)) - - # quantities - for i, name in enumerate(['K', 'Y', 'Cy', 'Co']): - ax = axs[i//3, i%3] - ax.plot(range(T+1), quant_seq[:T+1, i], label=name) - ax.hlines(init_ss[i], 0, T+1, color='r', linestyle='--') - ax.legend() - ax.set_xlabel('t') - - # prices - for i, name in enumerate(['W', 'r']): - ax = axs[(i+4)//3, (i+4)%3] - ax.plot(range(T+1), price_seq[:T+1, i], label=name) - ax.hlines(init_ss[i+4], 0, T+1, color='r', linestyle='--') - ax.legend() - ax.set_xlabel('t') - - # policies - for i, name in enumerate(['τ', 'D', 'G']): - ax = axs[(i+6)//3, (i+6)%3] - ax.plot(range(T+1), policy_seq[:T+1, i], label=name) - ax.hlines(init_ss[i+6], 0, T+1, color='r', linestyle='--') - ax.legend() - ax.set_xlabel('t') -``` - -We can initialize an instance of class `AK2` with model parameters $\{\alpha, \beta\}$ and then use it to conduct fiscal policy experiments. - -```{code-cell} ipython3 -ak2 = AK2(α, β) -``` - -We first examine that the "guess and verify" method leads to the same numerical results as we obtain with the closed form solution when lump sum taxes are muted - -```{code-cell} ipython3 -δy_seq = np.ones(T+2) * 0. -δo_seq = np.ones(T+2) * 0. - -D_pol = np.zeros(T+2) -G_pol = np.ones(T+2) * G_hat - -# tax cut -τ0 = τ_hat * (1 - 1/3) -D1 = D_hat * (1 + r_hat * (1 - τ0)) + G_hat - τ0 * Y_hat - δy_seq[0] - δo_seq[0] -D_pol[0] = D_hat -D_pol[1:] = D1 -``` - -```{code-cell} ipython3 -quant_seq3, price_seq3, policy_seq3 = ak2.simulate(T, init_ss, - δy_seq, δo_seq, - D_pol=D_pol, G_pol=G_pol, - verbose=True) -``` - -```{code-cell} ipython3 -ak2.plot() -``` - -Next, we activate lump sum taxes. - -Let's alter our {ref}`exp-tax-cut` fiscal policy experiment by assuming that the government also increases lump sum taxes for both young and old people $\delta_{yt}=\delta_{ot}=0.005, t\geq0$. - -```{code-cell} ipython3 -δy_seq = np.ones(T+2) * 0.005 -δo_seq = np.ones(T+2) * 0.005 - -D1 = D_hat * (1 + r_hat * (1 - τ0)) + G_hat - τ0 * Y_hat - δy_seq[0] - δo_seq[0] -D_pol[1:] = D1 - -quant_seq4, price_seq4, policy_seq4 = ak2.simulate(T, init_ss, - δy_seq, δo_seq, - D_pol=D_pol, G_pol=G_pol) -``` - -Note how "crowding out" has been mitigated. - -```{code-cell} ipython3 -fig, axs = plt.subplots(3, 3, figsize=(14, 10)) - -# quantities -for i, name in enumerate(['K', 'Y', 'Cy', 'Co']): - ax = axs[i//3, i%3] - ax.plot(range(T+1), quant_seq3[:T+1, i], label=name+', $\delta$s=0') - ax.plot(range(T+1), quant_seq4[:T+1, i], label=name+', $\delta$s=0.005') - ax.hlines(init_ss[i], 0, T+1, color='r', linestyle='--') - ax.legend() - ax.set_xlabel('t') - -# prices -for i, name in enumerate(['W', 'r']): - ax = axs[(i+4)//3, (i+4)%3] - ax.plot(range(T+1), price_seq3[:T+1, i], label=name+', $\delta$s=0') - ax.plot(range(T+1), price_seq4[:T+1, i], label=name+', $\delta$s=0.005') - ax.hlines(init_ss[i+4], 0, T+1, color='r', linestyle='--') - ax.legend() - ax.set_xlabel('t') - -# policies -for i, name in enumerate(['τ', 'D', 'G']): - ax = axs[(i+6)//3, (i+6)%3] - ax.plot(range(T+1), policy_seq3[:T+1, i], label=name+', $\delta$s=0') - ax.plot(range(T+1), policy_seq4[:T+1, i], label=name+', $\delta$s=0.005') - ax.hlines(init_ss[i+6], 0, T+1, color='r', linestyle='--') - ax.legend() - ax.set_xlabel('t') -``` - -Comparing to {ref}`exp-tax-cut`, the government raises lump-sum taxes to finance the increasing debt interest payment, which is less distortionary comparing to raising the capital income tax rate. - - -### Experiment 4: Unfunded Social Security System - -In this experiment, lump-sum taxes are of equal magnitudes for old and the young, but of opposite signs. - -A negative lump-sum tax is a subsidy. - -Thus, in this experiment we tax the young and subsidize the old. - -We start the economy at the same initial steady state that we assumed in several earlier experiments. - -The government sets the lump sum taxes $\delta_{y,t}=-\delta_{o,t}=10\% \hat{C}_{y}$ starting from $t=0$. - -It keeps debt levels and expenditures at their steady state levels $\hat{D}$ and $\hat{G}$. - -In effect, this experiment amounts to launching an unfunded social security system. - -We can use our code to compute the transition ignited by launching this system. - -Let's compare the results to the {ref}`exp-tax-cut`. - -```{code-cell} ipython3 -δy_seq = np.ones(T+2) * Cy_hat * 0.1 -δo_seq = np.ones(T+2) * -Cy_hat * 0.1 - -D_pol[:] = D_hat - -quant_seq5, price_seq5, policy_seq5 = ak2.simulate(T, init_ss, - δy_seq, δo_seq, - D_pol=D_pol, G_pol=G_pol) -``` - -```{code-cell} ipython3 -fig, axs = plt.subplots(3, 3, figsize=(14, 10)) - -# quantities -for i, name in enumerate(['K', 'Y', 'Cy', 'Co']): - ax = axs[i//3, i%3] - ax.plot(range(T+1), quant_seq3[:T+1, i], label=name+', tax cut') - ax.plot(range(T+1), quant_seq5[:T+1, i], label=name+', transfer') - ax.hlines(init_ss[i], 0, T+1, color='r', linestyle='--') - ax.legend() - ax.set_xlabel('t') - -# prices -for i, name in enumerate(['W', 'r']): - ax = axs[(i+4)//3, (i+4)%3] - ax.plot(range(T+1), price_seq3[:T+1, i], label=name+', tax cut') - ax.plot(range(T+1), price_seq5[:T+1, i], label=name+', transfer') - ax.hlines(init_ss[i+4], 0, T+1, color='r', linestyle='--') - ax.legend() - ax.set_xlabel('t') - -# policies -for i, name in enumerate(['τ', 'D', 'G']): - ax = axs[(i+6)//3, (i+6)%3] - ax.plot(range(T+1), policy_seq3[:T+1, i], label=name+', tax cut') - ax.plot(range(T+1), policy_seq5[:T+1, i], label=name+', transfer') - ax.hlines(init_ss[i+6], 0, T+1, color='r', linestyle='--') - ax.legend() - ax.set_xlabel('t') -``` - -An initial old person benefits especially when the social security system is launched because he receives a transfer but pays nothing for it. - -But in the long run, consumption rates of both young and old people decrease because the the social security system decreases incentives to save. - -That lowers the stock of physical capital and consequently lowers output. - -The government must then raise tax rate in order to pay for its expenditures. - -The higher rate on capital income further distorts incentives to save. diff --git a/lectures/ar1_processes.md b/lectures/ar1_processes.md index c12d5f18..fe54d3d1 100644 --- a/lectures/ar1_processes.md +++ b/lectures/ar1_processes.md @@ -19,7 +19,7 @@ kernelspec: ``` (ar1_processes)= -# AR1 Processes +# AR(1) Processes ```{index} single: Autoregressive processes ``` @@ -35,10 +35,8 @@ These simple models are used again and again in economic research to represent t * dividends * productivity, etc. -AR(1) processes can take negative values but are easily converted into positive processes when necessary by a transformation such as exponentiation. - We are going to study AR(1) processes partly because they are useful and -partly because they help us understand important concepts. +partly because they help us understand important concepts. Let's start with some imports: @@ -48,7 +46,7 @@ import matplotlib.pyplot as plt plt.rcParams["figure.figsize"] = (11, 5) #set default figure size ``` -## The AR(1) Model +## The AR(1) model The **AR(1) model** (autoregressive model of order 1) takes the form @@ -58,26 +56,45 @@ The **AR(1) model** (autoregressive model of order 1) takes the form X_{t+1} = a X_t + b + c W_{t+1} ``` -where $a, b, c$ are scalar-valued parameters. +where $a, b, c$ are scalar-valued parameters -This law of motion generates a time series $\{ X_t\}$ as soon as we -specify an initial condition $X_0$. +(Equation {eq}`can_ar1` is sometimes called a **stochastic difference equation**.) + +```{prf:example} +:label: ar1_ex_ar + +For example, $X_t$ might be + +* the log of labor income for a given household, or +* the log of money demand in a given economy. + +In either case, {eq}`can_ar1` shows that the current value evolves as a linear function +of the previous value and an IID shock $W_{t+1}$. + +(We use $t+1$ for the subscript of $W_{t+1}$ because this random variable is not +observed at time $t$.) +``` -This is called the **state process** and the state space is $\mathbb R$. +The specification {eq}`can_ar1` generates a time series $\{ X_t\}$ as soon as we +specify an initial condition $X_0$. To make things even simpler, we will assume that -* the process $\{ W_t \}$ is IID and standard normal, +* the process $\{ W_t \}$ is {ref}`IID ` and standard normal, * the initial condition $X_0$ is drawn from the normal distribution $N(\mu_0, v_0)$ and * the initial condition $X_0$ is independent of $\{ W_t \}$. -### Moving Average Representation + + + +### Moving average representation Iterating backwards from time $t$, we obtain $$ X_t = a X_{t-1} + b + c W_t = a^2 X_{t-2} + a b + a c W_{t-1} + b + c W_t + = a^3 X_{t-3} + a^2 b + a^2 c W_{t-2} + b + c W_t = \cdots $$ @@ -99,7 +116,7 @@ Equation {eq}`ar1_ma` shows that $X_t$ is a well defined random variable, the va Throughout, the symbol $\psi_t$ will be used to refer to the density of this random variable $X_t$. -### Distribution Dynamics +### Distribution dynamics One of the nice things about this model is that it's so easy to trace out the sequence of distributions $\{ \psi_t \}$ corresponding to the time series $\{ X_t\}$. @@ -110,10 +127,9 @@ This is immediate from {eq}`ar1_ma`, since linear combinations of independent normal random variables are normal. Given that $X_t$ is normally distributed, we will know the full distribution -$\psi_t$ if we can pin down its first two moments. +$\psi_t$ if we can pin down its first two [moments](https://en.wikipedia.org/wiki/Moment_(mathematics)). -Let $\mu_t$ and $v_t$ denote the mean and variance -of $X_t$ respectively. +Let $\mu_t$ and $v_t$ denote the mean and variance of $X_t$ respectively. We can pin down these values from {eq}`ar1_ma` or we can use the following recursive expressions: @@ -140,8 +156,7 @@ $$ \psi_t = N(\mu_t, v_t) $$ -The following code uses these facts to track the sequence of marginal -distributions $\{ \psi_t \}$. +The following code uses these facts to track the sequence of marginal distributions $\{ \psi_t \}$. The parameters are @@ -173,14 +188,26 @@ ax.legend(bbox_to_anchor=[1.05,1],loc=2,borderaxespad=1) plt.show() ``` -## Stationarity and Asymptotic Stability -Notice that, in the figure above, the sequence $\{ \psi_t \}$ seems to be converging to a limiting distribution. + +## Stationarity and asymptotic stability + +When we use models to study the real world, it is generally preferable that our +models have clear, sharp predictions. + +For dynamic problems, sharp predictions are related to stability. + +For example, if a dynamic model predicts that inflation always converges to some +kind of steady state, then the model gives a sharp prediction. + +(The prediction might be wrong, but even this is helpful, because we can judge the quality of the model.) + +Notice that, in the figure above, the sequence $\{ \psi_t \}$ seems to be converging to a limiting distribution, suggesting some kind of stability. This is even clearer if we project forward further into the future: ```{code-cell} python3 -def plot_density_seq(ax, mu_0=-3.0, v_0=0.6, sim_length=60): +def plot_density_seq(ax, mu_0=-3.0, v_0=0.6, sim_length=40): mu, v = mu_0, v_0 for t in range(sim_length): mu = a * mu + b @@ -200,7 +227,7 @@ For example, this alternative density sequence also converges to the same limit. ```{code-cell} python3 fig, ax = plt.subplots() -plot_density_seq(ax, mu_0=3.0) +plot_density_seq(ax, mu_0=4.0) plt.show() ``` @@ -235,7 +262,7 @@ We can confirm this is valid for the sequence above using the following code. ```{code-cell} python3 fig, ax = plt.subplots() -plot_density_seq(ax, mu_0=3.0) +plot_density_seq(ax, mu_0=4.0) mu_star = b / (1 - a) std_star = np.sqrt(c**2 / (1 - a**2)) # square root of v_star @@ -248,16 +275,21 @@ plt.show() As claimed, the sequence $\{ \psi_t \}$ converges to $\psi^*$. -### Stationary Distributions +We see that, at least for these parameters, the AR(1) model has strong stability +properties. + + -A stationary distribution is a distribution that is a fixed -point of the update rule for distributions. -In other words, if $\psi_t$ is stationary, then $\psi_{t+j} = -\psi_t$ for all $j$ in $\mathbb N$. +### Stationary distributions -A different way to put this, specialized to the current setting, is as follows: a -density $\psi$ on $\mathbb R$ is **stationary** for the AR(1) process if +Let's try to better understand the limiting distribution $\psi^*$. + +A stationary distribution is a distribution that is a "fixed point" of the update rule for the AR(1) process. + +In other words, if $\psi_t$ is stationary, then $\psi_{t+j} = \psi_t$ for all $j$ in $\mathbb N$. + +A different way to put this, specialized to the current setting, is as follows: a density $\psi$ on $\mathbb R$ is **stationary** for the AR(1) process if $$ X_t \sim \psi @@ -279,8 +311,8 @@ Thus, when $|a| < 1$, the AR(1) model has exactly one stationary density and tha The concept of ergodicity is used in different ways by different authors. -One way to understand it in the present setting is that a version of the Law -of Large Numbers is valid for $\{X_t\}$, even though it is not IID. +One way to understand it in the present setting is that a version of the law +of large numbers is valid for $\{X_t\}$, even though it is not IID. In particular, averages over time series converge to expectations under the stationary distribution. @@ -302,7 +334,10 @@ Notes: * In {eq}`ar1_ergo`, convergence holds with probability one. * The textbook by {cite}`MeynTweedie2009` is a classic reference on ergodicity. -For example, if we consider the identity function $h(x) = x$, we get +```{prf:example} +:label: ar1_ex_id + +If we consider the identity function $h(x) = x$, we get $$ \frac{1}{m} \sum_{t = 1}^m X_t \to @@ -310,11 +345,21 @@ $$ \quad \text{as } m \to \infty $$ -In other words, the time series sample mean converges to the mean of the -stationary distribution. +In other words, the time series sample mean converges to the mean of the stationary distribution. +``` + +Ergodicity is important for a range of reasons. + +For example, {eq}`ar1_ergo` can be used to test theory. + +In this equation, we can use observed data to evaluate the left hand side of {eq}`ar1_ergo`. + +And we can use a theoretical AR(1) model to calculate the right hand side. + +If $\frac{1}{m} \sum_{t = 1}^m X_t$ is not close to $\psi^(x)$, even for many +observations, then our theory seems to be incorrect and we will need to revise +it. -As will become clear over the next few lectures, ergodicity is a very -important concept for statistics and simulation. ## Exercises @@ -339,7 +384,7 @@ M_k = \end{cases} $$ -Here $n!!$ is the double factorial. +Here $n!!$ is the [double factorial](https://en.wikipedia.org/wiki/Double_factorial). According to {eq}`ar1_ergo`, we should have, for any $k \in \mathbb N$, diff --git a/lectures/cagan_adaptive.md b/lectures/cagan_adaptive.md index 14fe60cc..f3d48c34 100644 --- a/lectures/cagan_adaptive.md +++ b/lectures/cagan_adaptive.md @@ -62,7 +62,7 @@ $$ (eq:caganmd_ad) This equation asserts that the demand for real balances -is inversely related to the public's expected rate of inflation. +is inversely related to the public's expected rate of inflation with sensitivity $\alpha$. Equating the logarithm $m_t^d$ of the demand for money to the logarithm $m_t$ of the supply of money in equation {eq}`eq:caganmd_ad` and solving for the logarithm $p_t$ of the price level gives @@ -79,7 +79,7 @@ $$ $$ (eq:eqpipi) We assume that the expected rate of inflation $\pi_t^*$ is governed -by the following adaptive expectations scheme proposed by {cite}`Friedman1956` and {cite}`Cagan`: +by the following adaptive expectations scheme proposed by {cite}`Friedman1956` and {cite}`Cagan`, where $\lambda\in [0,1]$ denotes the weight on expected inflation. $$ \pi_{t+1}^* = \lambda \pi_t^* + (1 -\lambda) \pi_t diff --git a/lectures/cagan_ree.md b/lectures/cagan_ree.md index d694c57d..f0274b56 100644 --- a/lectures/cagan_ree.md +++ b/lectures/cagan_ree.md @@ -18,7 +18,7 @@ kernelspec: We'll use linear algebra first to explain and then do some experiments with a "monetarist theory of price levels". -Economists call it a "monetary" or "monetarist" theory of price levels because effects on price levels occur via a central banks's decisions to print money supply. +Economists call it a "monetary" or "monetarist" theory of price levels because effects on price levels occur via a central bank's decisions to print money supply. * a goverment's fiscal policies determine whether its _expenditures_ exceed its _tax collections_ * if its expenditures exceed its tax collections, the government can instruct the central bank to cover the difference by _printing money_ @@ -27,7 +27,7 @@ Economists call it a "monetary" or "monetarist" theory of price levels because e Such a theory of price levels was described by Thomas Sargent and Neil Wallace in chapter 5 of {cite}`sargent2013rational`, which reprints a 1981 Federal Reserve Bank of Minneapolis article entitled "Unpleasant Monetarist Arithmetic". -Sometimes this theory is also called a "fiscal theory of price levels" to emphasize the importance of fisal deficits in shaping changes in the money supply. +Sometimes this theory is also called a "fiscal theory of price levels" to emphasize the importance of fiscal deficits in shaping changes in the money supply. The theory has been extended, criticized, and applied by John Cochrane {cite}`cochrane2023fiscal`. @@ -41,7 +41,7 @@ persistent inflation. The "monetarist" or "fiscal theory of price levels" asserts that -* to _start_ a persistent inflation the government beings persistently to run a money-financed government deficit +* to _start_ a persistent inflation the government begins persistently to run a money-financed government deficit * to _stop_ a persistent inflation the government stops persistently running a money-financed government deficit @@ -94,7 +94,7 @@ m_t^d - p_t = -\alpha \pi_t^* \: , \: \alpha > 0 ; \quad t = 0, 1, \ldots, T . $$ (eq:caganmd) This equation asserts that the demand for real balances -is inversely related to the public's expected rate of inflation. +is inversely related to the public's expected rate of inflation with sensitivity $\alpha$. People somehow acquire **perfect foresight** by their having solved a forecasting problem. @@ -296,7 +296,7 @@ $$ \mu_t = \mu^* , \quad t \geq T_1 $$ -so that, in terms of our notation and formula for $\pi_{T+1}^*$ above, $\tilde \gamma = 1$. +so that, in terms of our notation and formula for $\pi_{T+1}^*$ above, $\gamma^* = 1$. #### Experiment 1: Foreseen sudden stabilization diff --git a/lectures/cobweb.md b/lectures/cobweb.md index 2a1c54ed..31d71812 100644 --- a/lectures/cobweb.md +++ b/lectures/cobweb.md @@ -4,7 +4,7 @@ jupytext: extension: .md format_name: myst format_version: 0.13 - jupytext_version: 1.14.1 + jupytext_version: 1.16.2 kernelspec: display_name: Python 3 (ipykernel) language: python @@ -14,7 +14,6 @@ kernelspec: (cobweb)= # The Cobweb Model - The cobweb model is a model of prices and quantities in a given market, and how they evolve over time. ## Overview @@ -24,7 +23,7 @@ because it shows the fundamental importance of *expectations*. To give some idea of how the model operates, and why expectations matter, imagine the following scenario. -There is a market for soy beans, say, where prices and traded quantities +There is a market for soybeans, say, where prices and traded quantities depend on the choices of buyers and sellers. The buyers are represented by a demand curve --- they buy more at low prices @@ -38,11 +37,11 @@ However, the sellers (who are farmers) need time to grow their crops. Suppose now that the price is currently high. Seeing this high price, and perhaps expecting that the high price will remain -for some time, the farmers plant many fields with soy beans. +for some time, the farmers plant many fields with soybeans. Next period the resulting high supply floods the market, causing the price to drop. -Seeing this low price, the farmers now shift out of soy beans, restricting +Seeing this low price, the farmers now shift out of soybeans, restricting supply and causing the price to climb again. You can imagine how these dynamics could cause cycles in prices and quantities @@ -52,13 +51,10 @@ The cobweb model puts these ideas into equations so we can try to quantify them, and to study conditions under which cycles persist (or disappear). In this lecture, we investigate and simulate the basic model under different -assumptions regarding the way that produces form expectations. +assumptions regarding the way that producers form expectations. Our discussion and simulations draw on [high quality lectures](https://comp-econ.org/CEF_2013/downloads/Complex%20Econ%20Systems%20Lecture%20II.pdf) by [Cars Hommes](https://www.uva.nl/en/profile/h/o/c.h.hommes/c.h.hommes.html). - -+++ - We will use the following imports. ```{code-cell} ipython3 @@ -70,7 +66,7 @@ import matplotlib.pyplot as plt Early papers on the cobweb cycle include {cite}`cobweb_model` and {cite}`hog_cycle`. -The paper {cite}`hog_cycle` uses the cobweb theorem to explain the prices of hog in the US over 1920--1950 +The paper {cite}`hog_cycle` uses the cobweb theorem to explain the prices of hog in the US over 1920--1950. The next plot replicates part of Figure 2 from that paper, which plots the price of hogs at yearly frequency. @@ -90,13 +86,11 @@ ax.grid() plt.show() ``` - - ## The model -Let's return to our discussion of a hypothetical soy bean market, where price is determined by supply and demand. +Let's return to our discussion of a hypothetical soybean market, where price is determined by supply and demand. -We suppose that demand for soy beans is given by +We suppose that demand for soybeans is given by $$ D(p_t) = a - b p_t @@ -106,15 +100,15 @@ where $a, b$ are nonnegative constants and $p_t$ is the spot (i.e, current marke ($D(p_t)$ is the quantity demanded in some fixed unit, such as thousands of tons.) -Because the crop of soy beans for time $t$ is planted at $t-1$, supply of soy beans at time $t$ depends on *expected* prices at time $t$, which we denote $p^e_{t-1}$. +Because the crop of soybeans for time $t$ is planted at $t-1$, supply of soybeans at time $t$ depends on *expected* prices at time $t$, which we denote $p^e_t$. We suppose that supply is nonlinear in expected prices, and takes the form $$ - S(p^e_{t-1}) = \tanh(\lambda(p^e_{t-1} - c)) + d + S(p^e_t) = \tanh(\lambda(p^e_t - c)) + d $$ -where $\lambda$ is a positive constant and $c, d \geq 0$. +where $\lambda$ is a positive constant, $c, d$ are nonnegative constants and $\tanh$ is a type of [hyperbolic function](https://en.wikipedia.org/wiki/Hyperbolic_functions). Let's make a plot of supply and demand for particular choices of the parameter values. @@ -149,7 +143,7 @@ m = Market() fig, ax = plt.subplots() ax.plot(p_grid, m.demand(p_grid), label="$D$") -ax.plot(p_grid, m.supply(p_grid), label="S") +ax.plot(p_grid, m.supply(p_grid), label="$S$") ax.set_xlabel("price") ax.set_ylabel("quantity") ax.legend() @@ -160,13 +154,13 @@ plt.show() Market equilibrium requires that supply equals demand, or $$ - a - b p_t = S(p^e_{t-1}) + a - b p_t = S(p^e_t) $$ Rewriting in terms of $p_t$ gives $$ - p_t = - \frac{1}{b} [S(p^e_{t-1}) - a] + p_t = - \frac{1}{b} [S(p^e_t) - a] $$ Finally, to complete the model, we need to describe how price expectations are formed. @@ -177,7 +171,7 @@ In particular, we suppose that ```{math} :label: p_et - p^e_{t-1} = f(p_{t-1}, p_{t-2}) + p^e_t = f(p_{t-1}, p_{t-2}) ``` where $f$ is some function. @@ -195,7 +189,6 @@ Combining the last two equations gives the dynamics for prices: The price dynamics depend on the parameter values and also on the function $f$ that determines how producers form expectations. - ## Naive expectations To go further in our analysis we need to specify the function $f$; that is, how expectations are formed. @@ -204,7 +197,9 @@ Let's start with naive expectations, which refers to the case where producers ex In other words, -$$ p_{t-1}^e = p_{t-1} $$ +$$ +p_t^e = p_{t-1} +$$ Using {eq}`price_t`, we then have @@ -225,7 +220,6 @@ where $g$ is the function defined by g(p) = - \frac{1}{b} [ S(p) - a] ``` - Here we represent the function $g$ ```{code-cell} ipython3 @@ -239,9 +233,9 @@ def g(model, current_price): return next_price ``` -Let's try to understand how prices will evolve using a 45 degree diagram, which is a tool for studying one-dimensional dynamics. +Let's try to understand how prices will evolve using a 45-degree diagram, which is a tool for studying one-dimensional dynamics. -The function `plot45` defined below helps us draw the 45 degree diagram. +The function `plot45` defined below helps us draw the 45-degree diagram. ```{code-cell} ipython3 :tags: [hide-input] @@ -277,7 +271,7 @@ def plot45(model, pmin, pmax, p0, num_arrows=5): ax.plot(pgrid, g(model, pgrid), 'b-', lw=2, alpha=0.6, label='g') - ax.plot(pgrid, pgrid, lw=1, alpha=0.7, label='45') + ax.plot(pgrid, pgrid, lw=1, alpha=0.7, label='$45\degree$') x = p0 xticks = [pmin] @@ -304,6 +298,8 @@ def plot45(model, pmin, pmax, p0, num_arrows=5): xticks.append(pmax) xtick_labels.append(pmax) + ax.set_ylabel(r'$p_{t+1}$') + ax.set_xlabel(r'$p_t$') ax.set_xticks(xticks) ax.set_yticks(xticks) ax.set_xticklabels(xtick_labels) @@ -316,7 +312,7 @@ def plot45(model, pmin, pmax, p0, num_arrows=5): plt.show() ``` -Now we can set up a market and plot the 45 degree diagram. +Now we can set up a market and plot the 45-degree diagram. ```{code-cell} ipython3 m = Market() @@ -326,7 +322,7 @@ m = Market() plot45(m, 0, 9, 2, num_arrows=3) ``` -The plot shows the function $g$ defined in {eq}`def_g` and the $45$ degree line. +The plot shows the function $g$ defined in {eq}`def_g` and the 45-degree line. Think of $ p_t $ as a value on the horizontal axis. @@ -334,13 +330,13 @@ Since $p_{t+1} = g(p_t)$, we use the graph of $g$ to see $p_{t+1}$ on the vertic Clearly, -- If $ g $ lies above the 45 degree line at $p_t$, then we have $ p_{t+1} > p_t $. -- If $ g $ lies below the 45 degree line at $p_t$, then we have $ p_{t+1} < p_t $. -- If $ g $ hits the 45 degree line at $p_t$, then we have $ p_{t+1} = p_t $, so $ p_t $ is a steady state. +- If $ g $ lies above the 45-degree line at $p_t$, then we have $ p_{t+1} > p_t $. +- If $ g $ lies below the 45-degree line at $p_t$, then we have $ p_{t+1} < p_t $. +- If $ g $ hits the 45-degree line at $p_t$, then we have $ p_{t+1} = p_t $, so $ p_t $ is a steady state. Consider the sequence of prices starting at $p_0$, as shown in the figure. -We find $p_1$ on the vertical axis and then shift it to the horizontal axis using the 45 degree line (where values on the two axes are equal). +We find $p_1$ on the vertical axis and then shift it to the horizontal axis using the 45-degree line (where values on the two axes are equal). Then from $p_1$ we obtain $p_2$ and continue. @@ -408,7 +404,7 @@ That is, ```{math} :label: pe_adaptive -p_{t-1}^e = \alpha p_{t-1} + (1-\alpha) p^e_{t-2} +p_t^e = \alpha p_{t-1} + (1-\alpha) p^e_{t-1} \qquad (0 \leq \alpha \leq 1) ``` @@ -416,7 +412,7 @@ Another way to write this is ```{math} :label: pe_adaptive_2 -p_{t-1}^e = p^e_{t-2} + \alpha (p_{t-1} - p_{t-2}^e) +p_t^e = p^e_{t-1} + \alpha (p_{t-1} - p_{t-1}^e) ``` This equation helps to show that expectations shift @@ -427,10 +423,9 @@ This equation helps to show that expectations shift Using {eq}`pe_adaptive`, we obtain the dynamics $$ - p_t = - \frac{1}{b} [ S(\alpha p_{t-1} + (1-\alpha) p^e_{t-2}) - a] + p_t = - \frac{1}{b} [ S(\alpha p_{t-1} + (1-\alpha) p^e_{t-1}) - a] $$ - Let's try to simulate the price and observe the dynamics using different values of $\alpha$. ```{code-cell} ipython3 @@ -464,8 +459,6 @@ def ts_price_plot_adaptive(model, p0, ts_length=10, α=[1.0, 0.9, 0.75]): Let's call the function with prices starting at $p_0 = 5$. -TODO does this fit well in the page, even in the pdf? If not should it be stacked vertically? - ```{code-cell} ipython3 ts_price_plot_adaptive(m, 5, ts_length=30) ``` @@ -477,9 +470,6 @@ expectations, which stabilizes expected prices. This increased stability can be seen in the figures. - -TODO check / fix exercises - ## Exercises ```{exercise-start} @@ -547,7 +537,7 @@ That is, ```{math} :label: pe_blae -p_{t-1}^e = \alpha p_{t-1} + (1-\alpha) p_{t-2} +p_t^e = \alpha p_{t-1} + (1-\alpha) p_{t-2} ``` @@ -605,9 +595,4 @@ ts_plot_price_blae(m, ``` ```{solution-end} -``` - -```{code-cell} ipython3 - -``` - +``` \ No newline at end of file diff --git a/lectures/commod_price.md b/lectures/commod_price.md index be5d5f33..efab8144 100644 --- a/lectures/commod_price.md +++ b/lectures/commod_price.md @@ -32,8 +32,7 @@ We will solve an equation where the price function is the unknown. This is harder than solving an equation for an unknown number, or vector. -The lecture will discuss one way to solve a "functional equation" for an unknown -function +The lecture will discuss one way to solve a [functional equation](https://en.wikipedia.org/wiki/Functional_equation) (an equation where the unknown object is a function). For this lecture we need the `yfinance` library. @@ -70,7 +69,7 @@ s = yf.download('CT=F', '2016-1-1', '2023-4-1')['Adj Close'] fig, ax = plt.subplots() ax.plot(s, marker='o', alpha=0.5, ms=1) -ax.set_ylabel('price', fontsize=12) +ax.set_ylabel('cotton price in USD', fontsize=12) ax.set_xlabel('date', fontsize=12) plt.show() @@ -134,13 +133,12 @@ $p_t$. The harvest of the commodity at time $t$ is $Z_t$. -We assume that the sequence $\{ Z_t \}_{t \geq 1}$ is IID with common -density function $\phi$. +We assume that the sequence $\{ Z_t \}_{t \geq 1}$ is IID with common density function $\phi$, where $\phi$ is nonnegative. Speculators can store the commodity between periods, with $I_t$ units purchased in the current period yielding $\alpha I_t$ units in the next. -Here $\alpha \in (0,1)$ is a depreciation rate for the commodity. +Here the parameter $\alpha \in (0,1)$ is a depreciation rate for the commodity. For simplicity, the risk free interest rate is taken to be zero, so expected profit on purchasing $I_t$ units is @@ -175,6 +173,7 @@ $$ \alpha \mathbb{E}_t \, p_{t+1} - p_t \leq 0 $$ (eq:arbi) +This means that if the expected price is lower than the current price, there is no room for arbitrage. Profit maximization gives the additional condition @@ -183,7 +182,7 @@ $$ $$ (eq:pmco) -We also require that the market clears in each period. +We also require that the market clears, with supply equaling demand in each period. We assume that consumers generate demand quantity $D(p)$ corresponding to price $p$. @@ -193,12 +192,12 @@ Let $P := D^{-1}$ be the inverse demand function. Regarding quantities, -* supply is the sum of carryover by speculators and the current harvest +* supply is the sum of carryover by speculators and the current harvest, and * demand is the sum of purchases by consumers and purchases by speculators. Mathematically, -* supply $ = X_t = \alpha I_{t-1} + Z_t$, which takes values in $S := \mathbb R_+$, while +* supply is given by $X_t = \alpha I_{t-1} + Z_t$, which takes values in $S := \mathbb R_+$, while * demand $ = D(p_t) + I_t$ Thus, the market equilibrium condition is @@ -220,6 +219,8 @@ How can we find an equilibrium? Our path of attack will be to seek a system of prices that depend only on the current state. +(Our solution method involves using an [ansatz](https://en.wikipedia.org/wiki/Ansatz), which is an educated guess --- in this case for the price function.) + In other words, we take a function $p$ on $S$ and set $p_t = p(X_t)$ for every $t$. Prices and quantities then follow @@ -235,8 +236,6 @@ conditions above. More precisely, we seek a $p$ such that [](eq:arbi) and [](eq:pmco) hold for the corresponding system [](eq:eosy). -To this end, suppose that there exists a function $p^*$ on $S$ -satisfying $$ p^*(x) = \max @@ -285,7 +284,7 @@ But then $D(p^*(X_t)) = X_t$ and $I_t = I(X_t) = 0$. As a consequence, both [](eq:arbi) and [](eq:pmco) hold. -We have found an equilibrium. +We have found an equilibrium, which verifies the ansatz. ### Computing the equilibrium @@ -347,7 +346,7 @@ The code below implements this iterative process, starting from $p_0 = P$. The distribution $\phi$ is set to a shifted Beta distribution (although many other choices are possible). -The integral in [](eq:dopf3) is computed via Monte Carlo. +The integral in [](eq:dopf3) is computed via {ref}`Monte Carlo `. ```{code-cell} ipython3 @@ -395,7 +394,8 @@ while error > tol: ax.plot(grid, price, 'k-', alpha=0.5, lw=2, label=r'$p^*$') ax.legend() -ax.set_xlabel('$x$', fontsize=12) +ax.set_xlabel('$x$') +ax.set_ylabel("prices") plt.show() ``` diff --git a/lectures/complex_and_trig.md b/lectures/complex_and_trig.md index 8fe53202..7f40497c 100644 --- a/lectures/complex_and_trig.md +++ b/lectures/complex_and_trig.md @@ -103,12 +103,16 @@ from sympy import (Symbol, symbols, Eq, nsolve, sqrt, cos, sin, simplify, ### An Example +```{prf:example} +:label: ct_ex_com + Consider the complex number $z = 1 + \sqrt{3} i$. For $z = 1 + \sqrt{3} i$, $x = 1$, $y = \sqrt{3}$. It follows that $r = 2$ and $\theta = \tan^{-1}(\sqrt{3}) = \frac{\pi}{3} = 60^o$. +``` Let's use Python to plot the trigonometric form of the complex number $z = 1 + \sqrt{3} i$. diff --git a/lectures/cons_smooth.md b/lectures/cons_smooth.md index b2bace94..2719baa8 100644 --- a/lectures/cons_smooth.md +++ b/lectures/cons_smooth.md @@ -21,7 +21,7 @@ In this lecture, we'll study a famous model of the "consumption function" that M In this lecture, we'll study what is often called the "consumption-smoothing model" using matrix multiplication and matrix inversion, the same tools that we used in this QuantEcon lecture {doc}`present values `. -Formulas presented in {doc}`present value formulas` are at the core of the consumption smoothing model because we shall use them to define a consumer's "human wealth". +Formulas presented in {doc}`present value formulas` are at the core of the consumption-smoothing model because we shall use them to define a consumer's "human wealth". The key idea that inspired Milton Friedman was that a person's non-financial income, i.e., his or her wages from working, could be viewed as a dividend stream from that person's ''human capital'' @@ -39,7 +39,7 @@ It will take a while for a "present value" or asset price explicilty to appear i ## Analysis -As usual, we'll start with by importing some Python modules. +As usual, we'll start by importing some Python modules. ```{code-cell} ipython3 import numpy as np @@ -128,7 +128,7 @@ Indeed, we shall see that when $\beta R = 1$ (a condition assumed by Milton Frie By **smoother** we mean as close as possible to being constant over time. -The preference for smooth consumption paths that is built into the model gives it the name "consumption smoothing model". +The preference for smooth consumption paths that is built into the model gives it the name "consumption-smoothing model". Let's dive in and do some calculations that will help us understand how the model works. @@ -176,7 +176,7 @@ $$ \sum_{t=0}^T R^{-t} c_t = a_0 + h_0. $$ (eq:budget_intertemp) -Equation {eq}`eq:budget_intertemp` says that the present value of the consumption stream equals the sum of finanical and non-financial (or human) wealth. +Equation {eq}`eq:budget_intertemp` says that the present value of the consumption stream equals the sum of financial and non-financial (or human) wealth. Robert Hall {cite}`Hall1978` showed that when $\beta R = 1$, a condition Milton Friedman had also assumed, it is "optimal" for a consumer to smooth consumption by setting @@ -196,7 +196,7 @@ $$ (eq:conssmoothing) Equation {eq}`eq:conssmoothing` is the consumption-smoothing model in a nutshell. -## Mechanics of Consumption smoothing model +## Mechanics of consumption-smoothing model As promised, we'll provide step-by-step instructions on how to use linear algebra, readily implemented in Python, to compute all objects in play in the consumption-smoothing model. @@ -338,14 +338,14 @@ print('Welfare:', welfare(cs_model, c_seq)) ### Experiments -In this section we decribe how a consumption sequence would optimally respond to different sequences sequences of non-financial income. +In this section we describe how a consumption sequence would optimally respond to different sequences sequences of non-financial income. -First we create a function `plot_cs` that generate graphs for different instances of the consumption smoothing model `cs_model`. +First we create a function `plot_cs` that generates graphs for different instances of the consumption-smoothing model `cs_model`. This will help us avoid rewriting code to plot outcomes for different non-financial income sequences. ```{code-cell} ipython3 -def plot_cs(model, # consumption smoothing model +def plot_cs(model, # consumption-smoothing model a0, # initial financial wealth y_seq # non-financial income process ): @@ -368,7 +368,7 @@ def plot_cs(model, # consumption smoothing model plt.show() ``` -In the experiments below, please study how consumption and financial asset sequences vary accross different sequences for non-financial income. +In the experiments below, please study how consumption and financial asset sequences vary across different sequences for non-financial income. #### Experiment 1: one-time gain/loss @@ -602,7 +602,7 @@ First, we define the welfare with respect to $\xi_1$ and $\phi$ def welfare_rel(ξ1, ϕ): """ Compute welfare of variation sequence - for given ϕ, ξ1 with a consumption smoothing model + for given ϕ, ξ1 with a consumption-smoothing model """ cvar_seq = compute_variation(cs_model, ξ1=ξ1, @@ -661,13 +661,13 @@ QuantEcon lecture {doc}`geometric series `. In particular, it **lowers** the government expenditure multiplier relative to one implied by the original Keynesian consumption function presented in {doc}`geometric series `. -Friedman's work opened the door to an enlighening literature on the aggregate consumption function and associated government expenditure multipliers that +Friedman's work opened the door to an enlightening literature on the aggregate consumption function and associated government expenditure multipliers that remains active today. ## Appendix: solving difference equations with linear algebra -In the preceding sections we have used linear algebra to solve a consumption smoothing model. +In the preceding sections we have used linear algebra to solve a consumption-smoothing model. The same tools from linear algebra -- matrix multiplication and matrix inversion -- can be used to study many other dynamic models. @@ -749,7 +749,7 @@ is the inverse of $A$ and check that $A A^{-1} = I$ ``` -### Second order difference equation +### Second-order difference equation A second-order linear difference equation for $\{y_t\}_{t=0}^T$ is @@ -783,6 +783,6 @@ Multiplying both sides by inverse of the matrix on the left again provides the ```{exercise} :label: consmooth_ex2 -As an exercise, we ask you to represent and solve a **third order linear difference equation**. +As an exercise, we ask you to represent and solve a **third-order linear difference equation**. How many initial conditions must you specify? ``` diff --git a/lectures/eigen_I.md b/lectures/eigen_I.md index 46dc221f..948b2f05 100644 --- a/lectures/eigen_I.md +++ b/lectures/eigen_I.md @@ -88,7 +88,8 @@ itself. This means $A$ is an $n \times n$ matrix that maps (or "transforms") a vector $x$ in $\mathbb{R}^n$ to a new vector $y=Ax$ also in $\mathbb{R}^n$. -Here's one example: +```{prf:example} +:label: eigen1_ex_sq $$ \begin{bmatrix} @@ -116,6 +117,7 @@ $$ transforms the vector $x = \begin{bmatrix} 1 \\ 3 \end{bmatrix}$ to the vector $y = \begin{bmatrix} 5 \\ 2 \end{bmatrix}$. +``` Let's visualize this using Python: diff --git a/lectures/eigen_II.md b/lectures/eigen_II.md index 52fd505e..5ccd9ebb 100644 --- a/lectures/eigen_II.md +++ b/lectures/eigen_II.md @@ -26,7 +26,7 @@ In addition to what's in Anaconda, this lecture will need the following librarie In this lecture we will begin with the foundational concepts in spectral theory. -Then we will explore the Perron-Frobenius Theorem and connect it to applications in Markov chains and networks. +Then we will explore the Perron-Frobenius theorem and connect it to applications in Markov chains and networks. We will use the following imports: @@ -64,6 +64,9 @@ An $n \times n$ nonnegative matrix $A$ is called irreducible if $A + A^2 + A^3 + In other words, for each $i,j$ with $1 \leq i, j \leq n$, there exists a $k \geq 0$ such that $a^{k}_{ij} > 0$. +```{prf:example} +:label: eigen2_ex_irr + Here are some examples to illustrate this further: $$ @@ -94,6 +97,7 @@ $$ $C$ is not irreducible since $C^k = C$ for all $k \geq 0$ and thus $c^{k}_{12},c^{k}_{21} = 0$ for all $k \geq 0$. +``` ### Left eigenvectors @@ -159,7 +163,7 @@ This is a more common expression and where the name left eigenvectors originates For a square nonnegative matrix $A$, the behavior of $A^k$ as $k \to \infty$ is controlled by the eigenvalue with the largest absolute value, often called the **dominant eigenvalue**. -For any such matrix $A$, the Perron-Frobenius Theorem characterizes certain +For any such matrix $A$, the Perron-Frobenius theorem characterizes certain properties of the dominant eigenvalue and its corresponding eigenvector. ```{prf:Theorem} Perron-Frobenius Theorem @@ -188,7 +192,7 @@ Let's build our intuition for the theorem using a simple example we have seen [b Now let's consider examples for each case. -#### Example: Irreducible matrix +#### Example: irreducible matrix Consider the following irreducible matrix $A$: @@ -204,7 +208,7 @@ We can compute the dominant eigenvalue and the corresponding eigenvector eig(A) ``` -Now we can see the claims of the Perron-Frobenius Theorem holds for the irreducible matrix $A$: +Now we can see the claims of the Perron-Frobenius theorem holds for the irreducible matrix $A$: 1. The dominant eigenvalue is real-valued and non-negative. 2. All other eigenvalues have absolute values less than or equal to the dominant eigenvalue. @@ -223,6 +227,9 @@ Let $A$ be a square nonnegative matrix and let $A^k$ be the $k^{th}$ power of $A A matrix is called **primitive** if there exists a $k \in \mathbb{N}$ such that $A^k$ is everywhere positive. +```{prf:example} +:label: eigen2_ex_prim + Recall the examples given in irreducible matrices: $$ @@ -244,10 +251,11 @@ B^2 = \begin{bmatrix} 1 & 0 \\ $$ $B$ is irreducible but not primitive since there are always zeros in either principal diagonal or secondary diagonal. +``` We can see that if a matrix is primitive, then it implies the matrix is irreducible but not vice versa. -Now let's step back to the primitive matrices part of the Perron-Frobenius Theorem +Now let's step back to the primitive matrices part of the Perron-Frobenius theorem ```{prf:Theorem} Continous of Perron-Frobenius Theorem :label: con-perron-frobenius @@ -259,7 +267,7 @@ If $A$ is primitive then, $ r(A)^{-m} A^m$ converges to $v w^{\top}$ when $m \rightarrow \infty$. The matrix $v w^{\top}$ is called the **Perron projection** of $A$. ``` -#### Example 1: Primitive matrix +#### Example 1: primitive matrix Consider the following primitive matrix $B$: @@ -277,7 +285,7 @@ We compute the dominant eigenvalue and the corresponding eigenvector eig(B) ``` -Now let's give some examples to see if the claims of the Perron-Frobenius Theorem hold for the primitive matrix $B$: +Now let's give some examples to see if the claims of the Perron-Frobenius theorem hold for the primitive matrix $B$: 1. The dominant eigenvalue is real-valued and non-negative. 2. All other eigenvalues have absolute values strictly less than the dominant eigenvalue. @@ -373,18 +381,18 @@ check_convergence(B) The result shows that the matrix is not primitive as it is not everywhere positive. -These examples show how the Perron-Frobenius Theorem relates to the eigenvalues and eigenvectors of positive matrices and the convergence of the power of matrices. +These examples show how the Perron-Frobenius theorem relates to the eigenvalues and eigenvectors of positive matrices and the convergence of the power of matrices. In fact we have already seen the theorem in action before in {ref}`the Markov chain lecture `. (spec_markov)= -#### Example 2: Connection to Markov chains +#### Example 2: connection to Markov chains We are now prepared to bridge the languages spoken in the two lectures. A primitive matrix is both irreducible and aperiodic. -So Perron-Frobenius Theorem explains why both {ref}`Imam and Temple matrix ` and [Hamilton matrix](https://en.wikipedia.org/wiki/Hamiltonian_matrix) converge to a stationary distribution, which is the Perron projection of the two matrices +So Perron-Frobenius theorem explains why both {ref}`Imam and Temple matrix ` and [Hamilton matrix](https://en.wikipedia.org/wiki/Hamiltonian_matrix) converge to a stationary distribution, which is the Perron projection of the two matrices ```{code-cell} ipython3 P = np.array([[0.68, 0.12, 0.20], @@ -449,7 +457,7 @@ As we have seen, the largest eigenvalue for a primitive stochastic matrix is one This can be proven using [Gershgorin Circle Theorem](https://en.wikipedia.org/wiki/Gershgorin_circle_theorem), but it is out of the scope of this lecture. -So by the statement (6) of Perron-Frobenius Theorem, $\lambda_i<1$ for all $i -# Inflation During French Revolution - - -## Overview - -This lecture describes some monetary and fiscal features of the French Revolution -described by {cite}`sargent_velde1995`. - -In order to finance public expenditures and service debts issued by earlier French governments, -successive French governments performed several policy experiments. - -Authors of these experiments were guided by their having decided to put in place monetary-fiscal policies recommended by particular theories. - -As a consequence, data on money growth and inflation from the period 1789 to 1787 at least temorarily illustrated outcomes predicted by these arrangements: - -* some *unpleasant monetarist arithmetic* like that described in this quanteon lecture XXX -that governed French government debt dynamics in the decades preceding 1789 - -* a *real bills* theory of the effects of government open market operations in which the government *backs* its issues of paper money with valuable real property or financial assets - -* a classical ``gold or silver'' standard - -* a classical inflation-tax theory of inflation in which Philip Cagan's demand for money studied -in this lecture is a key component - -* a *legal restrictions* or *financial repression* theory of the demand for real balances - -We use matplotlib to replicate several of the graphs that they used to present salient patterns. - - - -## Data Sources - -This notebook uses data from three spreadsheets: - - * datasets/fig_3.ods - * datasets/dette.xlsx - * datasets/assignat.xlsx - -```{code-cell} ipython3 -import numpy as np -import pandas as pd -import matplotlib.pyplot as plt -plt.rcParams.update({'font.size': 12}) -``` - - -## Figure 1 - - -```{code-cell} ipython3 ---- -mystnb: - figure: - caption: "Ratio of debt service to taxes, Britain and France" - name: fig1 ---- - -# Read the data from the Excel file -data1 = pd.read_excel('datasets/dette.xlsx', sheet_name='Debt', usecols='R:S', skiprows=5, nrows=99, header=None) -data1a = pd.read_excel('datasets/dette.xlsx', sheet_name='Debt', usecols='P', skiprows=89, nrows=15, header=None) - -# Plot the data -plt.figure() -plt.plot(range(1690, 1789), 100 * data1.iloc[:, 1], linewidth=0.8) - -date = np.arange(1690, 1789) -index = (date < 1774) & (data1.iloc[:, 0] > 0) -plt.plot(date[index], 100 * data1[index].iloc[:, 0], '*:', color='r', linewidth=0.8) - -# Plot the additional data -plt.plot(range(1774, 1789), 100 * data1a, '*:', color='orange') - -# Note about the data -# The French data before 1720 don't match up with the published version -# Set the plot properties -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) -plt.gca().set_facecolor('white') -plt.gca().set_xlim([1688, 1788]) -plt.ylabel('% of Taxes') - -plt.tight_layout() -plt.show() - -#plt.savefig('frfinfig1.pdf', dpi=600) -#plt.savefig('frfinfig1.jpg', dpi=600) -``` - - - {numref}`fig1` plots ratios of debt service to total taxes collected for Great Britain and France. - The figure shows - - * ratios of debt service to taxes rise for both countries at the beginning of the century and at the end of the century - * ratios that are similar for both countries in most years - - - - - -## Figure 2 - - -```{code-cell} ipython3 ---- -mystnb: - figure: - caption: "Government Expenditures and Tax Revenues in Britain" - name: fig2 ---- - -# Read the data from Excel file -data2 = pd.read_excel('datasets/dette.xlsx', sheet_name='Militspe', usecols='M:X', skiprows=7, nrows=102, header=None) - -# Plot the data -plt.figure() -plt.plot(range(1689, 1791), data2.iloc[:, 5], linewidth=0.8) -plt.plot(range(1689, 1791), data2.iloc[:, 11], linewidth=0.8, color='red') -plt.plot(range(1689, 1791), data2.iloc[:, 9], linewidth=0.8, color='orange') -plt.plot(range(1689, 1791), data2.iloc[:, 8], 'o-', markerfacecolor='none', linewidth=0.8, color='purple') - -# Customize the plot -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) -plt.gca().tick_params(labelsize=12) -plt.xlim([1689, 1790]) -plt.ylabel('millions of pounds', fontsize=12) - -# Add text annotations -plt.text(1765, 1.5, 'civil', fontsize=10) -plt.text(1760, 4.2, 'civil plus debt service', fontsize=10) -plt.text(1708, 15.5, 'total govt spending', fontsize=10) -plt.text(1759, 7.3, 'revenues', fontsize=10) - - -plt.tight_layout() -plt.show() - -# Save the figure as a PDF -#plt.savefig('frfinfig2.pdf', dpi=600) -``` - - - -{numref}`fig2` plots total taxes, total government expenditures, and the composition of government expenditures in Great Britain during much of the 18th century. - -## Figure 3 - - - - -```{code-cell} ipython3 -# Read the data from the Excel file -data1 = pd.read_excel('datasets/fig_3.xlsx', sheet_name='Sheet1', usecols='C:F', skiprows=5, nrows=30, header=None) - -data1.replace(0, np.nan, inplace=True) -``` - -```{code-cell} ipython3 ---- -mystnb: - figure: - caption: "Government Spending and Tax Revenues in France" - name: fr_fig3 ---- -# Plot the data -plt.figure() - -plt.plot(range(1759, 1789, 1), data1.iloc[:, 0], '-x', linewidth=0.8) -plt.plot(range(1759, 1789, 1), data1.iloc[:, 1], '--*', linewidth=0.8) -plt.plot(range(1759, 1789, 1), data1.iloc[:, 2], '-o', linewidth=0.8, markerfacecolor='none') -plt.plot(range(1759, 1789, 1), data1.iloc[:, 3], '-*', linewidth=0.8) - -plt.text(1775, 610, 'total spending', fontsize=10) -plt.text(1773, 325, 'military', fontsize=10) -plt.text(1773, 220, 'civil plus debt service', fontsize=10) -plt.text(1773, 80, 'debt service', fontsize=10) -plt.text(1785, 500, 'revenues', fontsize=10) - - - -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) -plt.ylim([0, 700]) -plt.ylabel('millions of livres') - -plt.tight_layout() -plt.show() - -#plt.savefig('frfinfig3.jpg', dpi=600) -``` - - -TO TEACH TOM: By staring at {numref}`fr_fig3` carefully - -{numref}`fr_fig3` plots total taxes, total government expenditures, and the composition of government expenditures in France during much of the 18th century. - -```{code-cell} ipython3 - ---- -mystnb: - figure: - caption: "Government Spending and Tax Revenues in France" - name: fr_fig3b ---- -# Plot the data -plt.figure() - -plt.plot(np.arange(1759, 1789, 1)[~np.isnan(data1.iloc[:, 0])], data1.iloc[:, 0][~np.isnan(data1.iloc[:, 0])], '-x', linewidth=0.8) -plt.plot(np.arange(1759, 1789, 1)[~np.isnan(data1.iloc[:, 1])], data1.iloc[:, 1][~np.isnan(data1.iloc[:, 1])], '--*', linewidth=0.8) -plt.plot(np.arange(1759, 1789, 1)[~np.isnan(data1.iloc[:, 2])], data1.iloc[:, 2][~np.isnan(data1.iloc[:, 2])], '-o', linewidth=0.8, markerfacecolor='none') -plt.plot(np.arange(1759, 1789, 1)[~np.isnan(data1.iloc[:, 3])], data1.iloc[:, 3][~np.isnan(data1.iloc[:, 3])], '-*', linewidth=0.8) - -plt.text(1775, 610, 'total spending', fontsize=10) -plt.text(1773, 325, 'military', fontsize=10) -plt.text(1773, 220, 'civil plus debt service', fontsize=10) -plt.text(1773, 80, 'debt service', fontsize=10) -plt.text(1785, 500, 'revenues', fontsize=10) - - -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) -plt.ylim([0, 700]) -plt.ylabel('millions of livres') - -plt.tight_layout() -plt.show() - -#plt.savefig('frfinfig3_ignore_nan.jpg', dpi=600) -``` - -{numref}`fr_fig3b` plots total taxes, total government expenditures, and the composition of government expenditures in France during much of the 18th century. - - - - -## Figure 4 - - -```{code-cell} ipython3 ---- -mystnb: - figure: - caption: "Military Spending in Britain and France" - name: fig4 ---- -# French military spending, 1685-1789, in 1726 livres -data4 = pd.read_excel('datasets/dette.xlsx', sheet_name='Militspe', usecols='D', skiprows=3, nrows=105, header=None).squeeze() -years = range(1685, 1790) - -plt.figure() -plt.plot(years, data4, '*-', linewidth=0.8) - -plt.plot(range(1689, 1791), data2.iloc[:, 4], linewidth=0.8) - -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) -plt.gca().tick_params(labelsize=12) -plt.xlim([1689, 1790]) -plt.xlabel('*: France') -plt.ylabel('Millions of livres') -plt.ylim([0, 475]) - -plt.tight_layout() -plt.show() - -#plt.savefig('frfinfig4.pdf', dpi=600) -``` - - -{numref}`fig4` plots total taxes, total government expenditures, and the composition of government expenditures in France during much of the 18th century. - -TO TEACH TOM: By staring at {numref}`fig4` carefully - - -## Figure 5 - - -```{code-cell} ipython3 ---- -mystnb: - figure: - caption: "Index of real per capital revenues, France" - name: fig5 ---- -# Read data from Excel file -data5 = pd.read_excel('datasets/dette.xlsx', sheet_name='Debt', usecols='K', skiprows=41, nrows=120, header=None) - -# Plot the data -plt.figure() -plt.plot(range(1726, 1846), data5.iloc[:, 0], linewidth=0.8) - -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) -plt.gca().set_facecolor('white') -plt.gca().tick_params(labelsize=12) -plt.xlim([1726, 1845]) -plt.ylabel('1726 = 1', fontsize=12) - -plt.tight_layout() -plt.show() - -# Save the figure as a PDF -#plt.savefig('frfinfig5.pdf', dpi=600) -``` - -TO TEACH TOM: By staring at {numref}`fig5` carefully - -## Rise and Fall of the *Assignat* - - - - We have partitioned Figures~\ref{fig:fig7}, \ref{fig:fig8}, and \ref{fig:fig9} - into three periods, corresponding -to different monetary regimes or episodes. The three clouds of points in -Figure~\ref{fig:fig7} - depict different real balance-inflation relationships. Only the cloud for the -third period has the inverse relationship familiar to us now from twentieth-century -hyperinflations. The first period ends in the late summer of 1793, and is characterized -by growing real balances and moderate inflation. The second period begins and ends -with the Terror. It is marked by high real balances, around 2,500 millions, and -roughly stable prices. The fall of Robespierre in late July 1794 begins the third -of our episodes, in which real balances decline and prices rise rapidly. We interpret -these three episodes in terms of three separate theories about money: a ``backing'' -or ''real bills'' theory (the text is Adam Smith (1776)), -a legal restrictions theory (TOM: HERE PLEASE CITE -Keynes,1940, AS WELL AS Bryant/Wallace:1984 and Villamil:1988) -and a classical hyperinflation theory.% -```{note} -According to the empirical definition of hyperinflation adopted by {cite}`Cagan`, -beginning in the month that inflation exceeds 50 percent -per month and ending in the month before inflation drops below 50 percent per month -for at least a year, the *assignat* experienced a hyperinflation from May to December -1795. -``` -We view these -theories not as competitors but as alternative collections of ``if-then'' -statements about government note issues, each of which finds its conditions more -nearly met in one of these episodes than in the other two. - - - - - -## Figure 7 - - -## To Do for Zejin - -I want to tweak and consolidate the extra lines that Zejin drew on the beautiful **Figure 7**. - -I'd like to experiment in plotting the **six** extra lines all on one graph -- a pair of lines for each of our subsamples - - * one for the $y$ on $x$ regression line - * another for the $x$ on $y$ regression line - -I'd like the $y$ on $x$ and $x$ on $y$ lines to be in separate colors. - -Once we are satisfied with this new graph with its six additional lines, we can dispense with the other graphs that add one line at a time. - -Zejin, I can explain on zoom the lessons I want to convey with this. - - - -Just to recall, to compute the regression lines, Zejin wrote a function that use standard formulas -for a and b in a least squares regression y = a + b x + residual -- i.e., b is ratio of sample covariance of y,x to sample variance of x; while a is then computed from a = sample mean of y - \hat b *sample mean of x - -We could presumably tell students how to do this with a couple of numpy lines -I'd like to create three additional versions of the following figure. - -To remind you, we focused on three subperiods: - - -* subperiod 1: ("real bills period): January 1791 to July 1793 - -* subperiod 2: ("terror:): August 1793 - July 1794 - -* subperiod 3: ("classic Cagan hyperinflation): August 1794 - March 1796 - - -I can explain what this is designed to show. - - - -```{code-cell} ipython3 -def fit(x, y): - - b = np.cov(x, y)[0, 1] / np.var(x) - a = y.mean() - b * x.mean() - - return a, b -``` - -```{code-cell} ipython3 -# load data -caron = np.load('datasets/caron.npy') -nom_balances = np.load('datasets/nom_balances.npy') - -infl = np.concatenate(([np.nan], -np.log(caron[1:63, 1] / caron[0:62, 1]))) -bal = nom_balances[14:77, 1] * caron[:, 1] / 1000 -``` - -```{code-cell} ipython3 -# fit data - -# reg y on x for three periods -a1, b1 = fit(bal[1:31], infl[1:31]) -a2, b2 = fit(bal[31:44], infl[31:44]) -a3, b3 = fit(bal[44:63], infl[44:63]) - -# reg x on y for three periods -a1_rev, b1_rev = fit(infl[1:31], bal[1:31]) -a2_rev, b2_rev = fit(infl[31:44], bal[31:44]) -a3_rev, b3_rev = fit(infl[44:63], bal[44:63]) -``` - -```{code-cell} ipython3 -plt.figure() -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) - -# first subsample -plt.plot(bal[1:31], infl[1:31], 'o', markerfacecolor='none', color='blue', label='real bills period') - -# second subsample -plt.plot(bal[31:44], infl[31:44], '+', color='red', label='terror') - -# third subsample -plt.plot(bal[44:63], infl[44:63], '*', color='orange', label='classic Cagan hyperinflation') - -plt.xlabel('real balances') -plt.ylabel('inflation') -plt.legend() - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig7.pdf', dpi=600) -``` - - - -```{code-cell} ipython3 -# fit data - -# reg y on x for three periods -a1, b1 = fit(bal[1:31], infl[1:31]) -a2, b2 = fit(bal[31:44], infl[31:44]) -a3, b3 = fit(bal[44:63], infl[44:63]) - -# reg x on y for three periods -a1_rev, b1_rev = fit(infl[1:31], bal[1:31]) -a2_rev, b2_rev = fit(infl[31:44], bal[31:44]) -a3_rev, b3_rev = fit(infl[44:63], bal[44:63]) -``` - -```{code-cell} ipython3 -plt.figure() -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) - -# first subsample -plt.plot(bal[1:31], infl[1:31], 'o', markerfacecolor='none', color='blue', label='real bills period') - -# second subsample -plt.plot(bal[34:44], infl[34:44], '+', color='red', label='terror') - -# third subsample # Tom tinkered with subsample period -plt.plot(bal[44:63], infl[44:63], '*', color='orange', label='classic Cagan hyperinflation') - -plt.xlabel('real balances') -plt.ylabel('inflation') -plt.legend() - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig7.pdf', dpi=600) -``` - - -

The above graph is Tom's experimental lab. We'll delete it eventually.

- -

Zejin: below is the grapth with six lines in one graph. The lines generated by regressing y on x have the same color as the corresponding data points, while the lines generated by regressing x on y are all in green.

- -```{code-cell} ipython3 -plt.figure() -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) - -# first subsample -plt.plot(bal[1:31], infl[1:31], 'o', markerfacecolor='none', color='blue', label='real bills period') -plt.plot(bal[1:31], a1 + bal[1:31] * b1, color='blue', linewidth=0.8) -plt.plot(a1_rev + b1_rev * infl[1:31], infl[1:31], color='green', linewidth=0.8) - -# second subsample -plt.plot(bal[31:44], infl[31:44], '+', color='red', label='terror') -plt.plot(bal[31:44], a2 + bal[31:44] * b2, color='red', linewidth=0.8) -plt.plot(a2_rev + b2_rev * infl[31:44], infl[31:44], color='green', linewidth=0.8) - -# third subsample -plt.plot(bal[44:63], infl[44:63], '*', color='orange', label='classic Cagan hyperinflation') -plt.plot(bal[44:63], a3 + bal[44:63] * b3, color='orange', linewidth=0.8) -plt.plot(a3_rev + b3_rev * infl[44:63], infl[44:63], color='green', linewidth=0.8) - -plt.xlabel('real balances') -plt.ylabel('inflation') -plt.legend() -#plt.savefig('frfinfig7.pdf', dpi=600) -``` - - - -

The graph below is Tom's version of the six lines in one graph. The lines generated by regressing y on x have the same color as the corresponding data points, while the lines generated by regressing x on y are all in green.

- -```{code-cell} ipython3 -plt.figure() -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) - -# first subsample -plt.plot(bal[1:31], infl[1:31], 'o', markerfacecolor='none', color='blue', label='real bills period') -plt.plot(bal[1:31], a1 + bal[1:31] * b1, color='blue', linewidth=0.8) -plt.plot(a1_rev + b1_rev * infl[1:31], infl[1:31], color='green', linewidth=0.8) - -# second subsample -plt.plot(bal[31:44], infl[31:44], '+', color='red', label='terror') -plt.plot(bal[34:44], a2 + bal[34:44] * b2, color='red', linewidth=0.8) -plt.plot(a2_rev + b2_rev * infl[34:44], infl[34:44], color='green', linewidth=0.8) - -# third subsample -plt.plot(bal[44:63], infl[44:63], '*', color='orange', label='classic Cagan hyperinflation') -plt.plot(bal[44:63], a3 + bal[44:63] * b3, color='orange', linewidth=0.8) -plt.plot(a3_rev + b3_rev * infl[44:63], infl[44:63], color='green', linewidth=0.8) - -plt.xlabel('real balances') -plt.ylabel('inflation') -plt.legend() - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig7.pdf', dpi=600) -``` - -```{code-cell} ipython3 -plt.figure() -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) - -# first subsample -plt.plot(bal[1:31], infl[1:31], 'o', markerfacecolor='none', color='blue', label='real bills period') -plt.plot(bal[1:31], a1 + bal[1:31] * b1, color='blue') - -# second subsample -plt.plot(bal[31:44], infl[31:44], '+', color='red', label='terror') - -# third subsample -plt.plot(bal[44:63], infl[44:63], '*', color='orange', label='classic Cagan hyperinflation') - -plt.xlabel('real balances') -plt.ylabel('inflation') -plt.legend() - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig7_line1.pdf', dpi=600) -``` - -```{code-cell} ipython3 -plt.figure() -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) - -# first subsample -plt.plot(bal[1:31], infl[1:31], 'o', markerfacecolor='none', color='blue', label='real bills period') -plt.plot(a1_rev + b1_rev * infl[1:31], infl[1:31], color='blue') - -# second subsample -plt.plot(bal[31:44], infl[31:44], '+', color='red', label='terror') - -# third subsample -plt.plot(bal[44:63], infl[44:63], '*', color='orange', label='classic Cagan hyperinflation') - -plt.xlabel('real balances') -plt.ylabel('inflation') -plt.legend() - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig7_line1_rev.pdf', dpi=600) -``` - -```{code-cell} ipython3 -plt.figure() -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) - -# first subsample -plt.plot(bal[1:31], infl[1:31], 'o', markerfacecolor='none', color='blue', label='real bills period') - -# second subsample -plt.plot(bal[31:44], infl[31:44], '+', color='red', label='terror') -plt.plot(bal[31:44], a2 + bal[31:44] * b2, color='red') - -# third subsample -plt.plot(bal[44:63], infl[44:63], '*', color='orange', label='classic Cagan hyperinflation') - -plt.xlabel('real balances') -plt.ylabel('inflation') -plt.legend() - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig7_line2.pdf', dpi=600) -``` - -```{code-cell} ipython3 -plt.figure() -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) - -# first subsample -plt.plot(bal[1:31], infl[1:31], 'o', markerfacecolor='none', color='blue', label='real bills period') - -# second subsample -plt.plot(bal[31:44], infl[31:44], '+', color='red', label='terror') -plt.plot(a2_rev + b2_rev * infl[31:44], infl[31:44], color='red') - -# third subsample -plt.plot(bal[44:63], infl[44:63], '*', color='orange', label='classic Cagan hyperinflation') - -plt.xlabel('real balances') -plt.ylabel('inflation') -plt.legend() - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig7_line2_rev.pdf', dpi=600) -``` - -```{code-cell} ipython3 -plt.figure() -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) - -# first subsample -plt.plot(bal[1:31], infl[1:31], 'o', markerfacecolor='none', color='blue', label='real bills period') - -# second subsample -plt.plot(bal[31:44], infl[31:44], '+', color='red', label='terror') - -# third subsample -plt.plot(bal[44:63], infl[44:63], '*', color='orange', label='classic Cagan hyperinflation') -plt.plot(bal[44:63], a3 + bal[44:63] * b3, color='orange') - -plt.xlabel('real balances') -plt.ylabel('inflation') -plt.legend() - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig7_line3.pdf', dpi=600) -``` - -```{code-cell} ipython3 -plt.figure() -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) - -# first subsample -plt.plot(bal[1:31], infl[1:31], 'o', markerfacecolor='none', color='blue', label='real bills period') - -# second subsample -plt.plot(bal[31:44], infl[31:44], '+', color='red', label='terror') - -# third subsample -plt.plot(bal[44:63], infl[44:63], '*', color='orange', label='classic Cagan hyperinflation') -plt.plot(a3_rev + b3_rev * infl[44:63], infl[44:63], color='orange') - -plt.xlabel('real balances') -plt.ylabel('inflation') -plt.legend() - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig7_line3_rev.pdf', dpi=600) -``` - - -## Figure 8 - - -```{code-cell} ipython3 ---- -mystnb: - figure: - caption: "Real balances of assignats (in gold and goods)" - name: fig8 ---- -# Read the data from Excel file -data7 = pd.read_excel('datasets/assignat.xlsx', sheet_name='Data', usecols='P:Q', skiprows=4, nrows=80, header=None) -data7a = pd.read_excel('datasets/assignat.xlsx', sheet_name='Data', usecols='L', skiprows=4, nrows=80, header=None) - -# Create the figure and plot -plt.figure() -h = plt.plot(pd.date_range(start='1789-11-01', periods=len(data7), freq='M'), (data7a.values * [1, 1]) * data7.values, linewidth=1.) -plt.setp(h[1], linestyle='--', color='red') - -plt.vlines([pd.Timestamp('1793-07-15'), pd.Timestamp('1793-07-15')], 0, 3000, linewidth=0.8, color='orange') -plt.vlines([pd.Timestamp('1794-07-15'), pd.Timestamp('1794-07-15')], 0, 3000, linewidth=0.8, color='purple') - -plt.ylim([0, 3000]) - -# Set properties of the plot -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) -plt.gca().set_facecolor('white') -plt.gca().tick_params(labelsize=12) -plt.xlim(pd.Timestamp('1789-11-01'), pd.Timestamp('1796-06-01')) -plt.ylabel('millions of livres', fontsize=12) - -# Add text annotations -plt.text(pd.Timestamp('1793-09-01'), 200, 'Terror', fontsize=12) -plt.text(pd.Timestamp('1791-05-01'), 750, 'gold value', fontsize=12) -plt.text(pd.Timestamp('1794-10-01'), 2500, 'real value', fontsize=12) - - -plt.tight_layout() -plt.show() - -# Save the figure as a PDF -#plt.savefig('frfinfig8.pdf', dpi=600) -``` - -TO TEACH TOM: By staring at {numref}`fig8` carefully - - -## Figure 9 - - -```{code-cell} ipython3 ---- -mystnb: - figure: - caption: "Price Level and Price of Gold (log scale)" - name: fig9 ---- -# Create the figure and plot -plt.figure() -x = np.arange(1789 + 10/12, 1796 + 5/12, 1/12) -h, = plt.plot(x, 1. / data7.iloc[:, 0], linestyle='--') -h, = plt.plot(x, 1. / data7.iloc[:, 1], color='r') - -# Set properties of the plot -plt.gca().tick_params(labelsize=12) -plt.yscale('log') -plt.xlim([1789 + 10/12, 1796 + 5/12]) -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) - -# Add vertical lines -plt.axvline(x=1793 + 6.5/12, linestyle='-', linewidth=0.8, color='orange') -plt.axvline(x=1794 + 6.5/12, linestyle='-', linewidth=0.8, color='purple') - -# Add text -plt.text(1793.75, 120, 'Terror', fontsize=12) -plt.text(1795, 2.8, 'price level', fontsize=12) -plt.text(1794.9, 40, 'gold', fontsize=12) - - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig9.pdf', dpi=600) -``` - -TO TEACH TOM: By staring at {numref}`fig9` carefully - - -## Figure 11 - - - - -```{code-cell} ipython3 ---- -mystnb: - figure: - caption: "Spending (blue) and Revenues (orange), (real values)" - name: fig11 ---- -# Read data from Excel file -data11 = pd.read_excel('datasets/assignat.xlsx', sheet_name='Budgets', usecols='J:K', skiprows=22, nrows=52, header=None) - -# Prepare the x-axis data -x_data = np.concatenate([ - np.arange(1791, 1794 + 8/12, 1/12), - np.arange(1794 + 9/12, 1795 + 3/12, 1/12) -]) - -# Remove NaN values from the data -data11_clean = data11.dropna() - -# Plot the data -plt.figure() -h = plt.plot(x_data, data11_clean.values[:, 0], linewidth=0.8) -h = plt.plot(x_data, data11_clean.values[:, 1], '--', linewidth=0.8) - - - -# Set plot properties -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) -plt.gca().set_facecolor('white') -plt.gca().tick_params(axis='both', which='major', labelsize=12) -plt.xlim([1791, 1795 + 3/12]) -plt.xticks(np.arange(1791, 1796)) -plt.yticks(np.arange(0, 201, 20)) - -# Set the y-axis label -plt.ylabel('millions of livres', fontsize=12) - - - -plt.tight_layout() -plt.show() - -#plt.savefig('frfinfig11.pdf', dpi=600) -``` -TO TEACH TOM: By staring at {numref}`fig11` carefully - - -## Figure 12 - - -```{code-cell} ipython3 -# Read data from Excel file -data12 = pd.read_excel('datasets/assignat.xlsx', sheet_name='seignor', usecols='F', skiprows=6, nrows=75, header=None).squeeze() - - -# Create a figure and plot the data -plt.figure() -plt.plot(pd.date_range(start='1790', periods=len(data12), freq='M'), data12, linewidth=0.8) - -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) - -plt.axhline(y=472.42/12, color='r', linestyle=':') -plt.xticks(ticks=pd.date_range(start='1790', end='1796', freq='AS'), labels=range(1790, 1797)) -plt.xlim(pd.Timestamp('1791'), pd.Timestamp('1796-02') + pd.DateOffset(months=2)) -plt.ylabel('millions of livres', fontsize=12) -plt.text(pd.Timestamp('1793-11'), 39.5, 'revenues in 1788', verticalalignment='top', fontsize=12) - - -plt.tight_layout() -plt.show() - -#plt.savefig('frfinfig12.pdf', dpi=600) -``` - - -## Figure 13 - - -```{code-cell} ipython3 -# Read data from Excel file -data13 = pd.read_excel('datasets/assignat.xlsx', sheet_name='Exchge', usecols='P:T', skiprows=3, nrows=502, header=None) - -# Plot the last column of the data -plt.figure() -plt.plot(data13.iloc[:, -1], linewidth=0.8) - -# Set properties of the plot -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) -plt.gca().set_xlim([1, len(data13)]) - -# Set x-ticks and x-tick labels -ttt = np.arange(1, len(data13) + 1) -plt.xticks(ttt[~np.isnan(data13.iloc[:, 0])], - ['Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec', 'Jan', 'Feb', - 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep']) - -# Add text to the plot -plt.text(1, 120, '1795', fontsize=12, ha='center') -plt.text(262, 120, '1796', fontsize=12, ha='center') - -# Draw a horizontal line and add text -plt.axhline(y=186.7, color='red', linestyle='-', linewidth=0.8) -plt.text(150, 190, 'silver parity', fontsize=12) - -# Add an annotation with an arrow -plt.annotate('end of the assignat', xy=(340, 172), xytext=(380, 160), - arrowprops=dict(facecolor='black', arrowstyle='->'), fontsize=12) - - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig13.pdf', dpi=600) -``` - - -## Figure 14 - - -```{code-cell} ipython3 -# figure 14 -data14 = pd.read_excel('datasets/assignat.xlsx', sheet_name='Post-95', usecols='I', skiprows=9, nrows=91, header=None).squeeze() -data14a = pd.read_excel('datasets/assignat.xlsx', sheet_name='Post-95', usecols='F', skiprows=100, nrows=151, header=None).squeeze() - -plt.figure() -h = plt.plot(data14, '*-', markersize=2, linewidth=0.8) -plt.plot(np.concatenate([np.full(data14.shape, np.nan), data14a]), linewidth=0.8) -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) -plt.gca().set_xticks(range(20, 237, 36)) -plt.gca().set_xticklabels(range(1796, 1803)) -plt.xlabel('*: Before the 2/3 bankruptcy') -plt.ylabel('Francs') - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig14.pdf', dpi=600) -``` - - -## Figure 15 - - -```{code-cell} ipython3 -# figure 15 -data15 = pd.read_excel('datasets/assignat.xlsx', sheet_name='Post-95', usecols='N', skiprows=4, nrows=88, header=None).squeeze() - -plt.figure() -h = plt.plot(range(2, 90), data15, '*-', linewidth=0.8) -plt.setp(h, markersize=2) -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) -plt.text(47.5, 11.4, '17 brumaire', horizontalalignment='left', fontsize=12) -plt.text(49.5, 14.75, '19 brumaire', horizontalalignment='left', fontsize=12) -plt.text(15, -1, 'Vendémiaire 8', fontsize=12, horizontalalignment='center') -plt.text(45, -1, 'Brumaire', fontsize=12, horizontalalignment='center') -plt.text(75, -1, 'Frimaire', fontsize=12, horizontalalignment='center') -plt.ylim([0, 25]) -plt.xticks([], []) -plt.ylabel('Francs') - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig15.pdf', dpi=600) -``` - -```{code-cell} ipython3 - -``` - - -## Fiscal Situation and Response of National Assembly - - -In response to a motion by Catholic Bishop Talleyrand, -the National Assembly confiscated and nationalized Church lands. - -But the National Assembly was dominated by free market advocates, not socialists. - -The National Assembly intended to use earnings from Church lands to service its national debt. - -To do this, it began to implement a ''privatization plan'' that would let it service its debt while -not raising taxes. - -Their plan involved issuing paper notes called ''assignats'' that entitled bearers to use them to purchase state lands. - -These paper notes would be ''as good as silver coins'' in the sense that both were acceptable means of payment in exchange for those (formerly) church lands. - -Finance Minister Necker and the Constituants planned -to solve the privatization problem **and** the debt problem simultaneously -by creating a new currency. - -They devised a scheme to raise revenues by auctioning -the confiscated lands, thereby withdrawing paper notes issued on the security of -the lands sold by the government. - - This ''tax-backed money'' scheme propelled the National Assembly into the domain of monetary experimentation. - -Records of their debates show -how members of the Assembly marshaled theory and evidence to assess the likely -effects of their innovation. - -They quoted David Hume and Adam Smith and cited John -Law's System of 1720 and the American experiences with paper money fifteen years -earlier as examples of how paper money schemes can go awry. - - -### Necker's plan and how it was tweaked - -Necker's original plan embodied two components: a national bank and a new -financial instrument, the ''assignat''. - - -Necker's national -bank was patterned after the Bank of England. He proposed to transform the *Caisse d'Escompte* into a national bank by granting it a monopoly on issuing -notes and marketing government debt. The *Caisse* was a -discount bank founded in 1776 whose main function was to discount commercial bills -and issue convertible notes. Although independent of the government in principle, -it had occasionally been used as a source of loans. Its notes had been declared -inconvertible in August 1788, and by the time of Necker's proposal, its reserves -were exhausted. Necker's plan placed the National Estates (as the Church lands -became known after the addition of the royal demesne) at the center of the financial -picture: a ''Bank of France'' would issue a $5\%$ security mortgaged on the prospective -receipts from the modest sale of some 400 millions' worth of National Estates in -the years 1791 to 1793. -```{note} - Only 170 million was to be used initially -to cover the deficits of 1789 and 1790. -``` - - -By mid-1790, members of the National Assembly had agreed to sell the National -Estates and to use the proceeds to service the debt in a ``tax-backed money'' scheme -```{note} -Debt service costs absorbed - over 60\% of French government expenditures. -``` - -The government would issue securities with which it would reimburse debt. - -The securities -were acceptable as payment for National Estates purchased at auctions; once received -in payment, they were to be burned. - -```{note} -The appendix to {cite}`sargent_velde1995` describes the -auction rules in detail. -``` -The Estates available for sale were thought to be worth about 2,400 -million, while the exactable debt (essentially fixed-term loans, unpaid arrears, -and liquidated offices) stood at about 2,000 million. The value of the land was -sufficient to let the Assembly retire all of the exactable debt and thereby eliminate -the interest payments on it. After lengthy debates, in August 1790, the Assembly set the denomination -and interest rate structure of the debt. - - -```{note} Two distinct -aspects of monetary theory help in thinking about the assignat plan. First, a system -beginning with a commodity standard typically has room for a once-and-for-all emission -of (an unbacked) paper currency that can replace the commodity money without generating -inflation. \citet{Sargent/Wallace:1983} describe models with this property. That -commodity money systems are wasteful underlies Milton Friedman's (1960) TOM:ADD REFERENCE preference -for a fiat money regime over a commodity money. Second, in a small country on a -commodity money system that starts with restrictions on intermediation, those restrictions -can be relaxed by letting the government issue bank notes on the security of safe -private indebtedness, while leaving bank notes convertible into gold at par. See -Adam Smith and Sargent and Wallace (1982) for expressions of this idea. TOM: ADD REFERENCES HEREAND IN BIBTEX FILE. -``` - - -```{note} -The -National Assembly debated many now classic questions in monetary economics. Under -what conditions would money creation generate inflation, with what consequences -for business conditions? Distinctions were made between issue of money to pay off -debt, on one hand, and monetization of deficits, on the other. Would *assignats* be akin -to notes emitted under a real bills regime, and cause loss of specie, or would -they circulate alongside specie, thus increasing the money stock? Would inflation -affect real wages? How would it impact foreign trade, competitiveness of French -industry and agriculture, balance of trade, foreign exchange? -``` diff --git a/lectures/french_rev.md b/lectures/french_rev.md index 780f4317..1f5aec0d 100644 --- a/lectures/french_rev.md +++ b/lectures/french_rev.md @@ -1,1031 +1,996 @@ ---- -jupytext: - text_representation: - extension: .md - format_name: myst - format_version: 0.13 - jupytext_version: 1.16.1 -kernelspec: - display_name: Python 3 (ipykernel) - language: python - name: python3 ---- - - -# Inflation During French Revolution - - -## Overview - -This lecture describes some monetary and fiscal features of the French Revolution -described by {cite}`sargent_velde1995`. - -We use matplotlib to replicate several of the graphs that they used to present salient patterns. - - - -## Fiscal Situation and Response of National Assembly - - -In response to a motion by Catholic Bishop Talleyrand, -the National Assembly confiscated and nationalized Church lands. - -But the National Assembly was dominated by free market advocates, not socialists. - -The National Assembly intended to use earnings from Church lands to service its national debt. - -To do this, it began to implement a ''privatization plan'' that would let it service its debt while -not raising taxes. - -Their plan involved issuing paper notes called ''assignats'' that entitled bearers to use them to purchase state lands. - -These paper notes would be ''as good as silver coins'' in the sense that both were acceptable means of payment in exchange for those (formerly) church lands. - -Finance Minister Necker and the Constituants planned -to solve the privatization problem **and** the debt problem simultaneously -by creating a new currency. - -They devised a scheme to raise revenues by auctioning -the confiscated lands, thereby withdrawing paper notes issued on the security of -the lands sold by the government. - - This ''tax-backed money'' scheme propelled the National Assembly into the domain of monetary experimentation. - -Records of their debates show -how members of the Assembly marshaled theory and evidence to assess the likely -effects of their innovation. - -They quoted David Hume and Adam Smith and cited John -Law's System of 1720 and the American experiences with paper money fifteen years -earlier as examples of how paper money schemes can go awry. - - -### Necker's plan and how it was tweaked - -Necker's original plan embodied two components: a national bank and a new -financial instrument, the ''assignat''. - - -Necker's national -bank was patterned after the Bank of England. He proposed to transform the *Caisse d'Escompte* into a national bank by granting it a monopoly on issuing -notes and marketing government debt. The *Caisse* was a -discount bank founded in 1776 whose main function was to discount commercial bills -and issue convertible notes. Although independent of the government in principle, -it had occasionally been used as a source of loans. Its notes had been declared -inconvertible in August 1788, and by the time of Necker's proposal, its reserves -were exhausted. Necker's plan placed the National Estates (as the Church lands -became known after the addition of the royal demesne) at the center of the financial -picture: a ''Bank of France'' would issue a $5\%$ security mortgaged on the prospective -receipts from the modest sale of some 400 millions' worth of National Estates in -the years 1791 to 1793. -```{note} - Only 170 million was to be used initially -to cover the deficits of 1789 and 1790. -``` - - -By mid-1790, members of the National Assembly had agreed to sell the National -Estates and to use the proceeds to service the debt in a ``tax-backed money'' scheme -```{note} -Debt service costs absorbed - over 60\% of French government expenditures. -``` - -The government would issue securities with which it would reimburse debt. - -The securities -were acceptable as payment for National Estates purchased at auctions; once received -in payment, they were to be burned. - -```{note} -The appendix to {cite}`sargent_velde1995` describes the -auction rules in detail. -``` -The Estates available for sale were thought to be worth about 2,400 -million, while the exactable debt (essentially fixed-term loans, unpaid arrears, -and liquidated offices) stood at about 2,000 million. The value of the land was -sufficient to let the Assembly retire all of the exactable debt and thereby eliminate -the interest payments on it. After lengthy debates, in August 1790, the Assembly set the denomination -and interest rate structure of the debt. - - -```{note} Two distinct -aspects of monetary theory help in thinking about the assignat plan. First, a system -beginning with a commodity standard typically has room for a once-and-for-all emission -of (an unbacked) paper currency that can replace the commodity money without generating -inflation. \citet{Sargent/Wallace:1983} describe models with this property. That -commodity money systems are wasteful underlies Milton Friedman's (1960) TOM:ADD REFERENCE preference -for a fiat money regime over a commodity money. Second, in a small country on a -commodity money system that starts with restrictions on intermediation, those restrictions -can be relaxed by letting the government issue bank notes on the security of safe -private indebtedness, while leaving bank notes convertible into gold at par. See -Adam Smith and Sargent and Wallace (1982) for expressions of this idea. TOM: ADD REFERENCES HEREAND IN BIBTEX FILE. -``` - - -```{note} -The -National Assembly debated many now classic questions in monetary economics. Under -what conditions would money creation generate inflation, with what consequences -for business conditions? Distinctions were made between issue of money to pay off -debt, on one hand, and monetization of deficits, on the other. Would *assignats* be akin -to notes emitted under a real bills regime, and cause loss of specie, or would -they circulate alongside specie, thus increasing the money stock? Would inflation -affect real wages? How would it impact foreign trade, competitiveness of French -industry and agriculture, balance of trade, foreign exchange? -``` - -## Data Sources - -This notebook uses data from three spreadsheets: - - * datasets/fig_3.ods - * datasets/dette.xlsx - * datasets/assignat.xlsx - -```{code-cell} ipython3 -import numpy as np -import pandas as pd -import matplotlib.pyplot as plt -plt.rcParams.update({'font.size': 12}) -``` - - -## Figure 1 - - -```{code-cell} ipython3 ---- -mystnb: - figure: - caption: "Ratio of debt service to taxes, Britain and France" - name: fig1 ---- - -# Read the data from the Excel file -data1 = pd.read_excel('datasets/dette.xlsx', sheet_name='Debt', usecols='R:S', skiprows=5, nrows=99, header=None) -data1a = pd.read_excel('datasets/dette.xlsx', sheet_name='Debt', usecols='P', skiprows=89, nrows=15, header=None) - -# Plot the data -plt.figure() -plt.plot(range(1690, 1789), 100 * data1.iloc[:, 1], linewidth=0.8) - -date = np.arange(1690, 1789) -index = (date < 1774) & (data1.iloc[:, 0] > 0) -plt.plot(date[index], 100 * data1[index].iloc[:, 0], '*:', color='r', linewidth=0.8) - -# Plot the additional data -plt.plot(range(1774, 1789), 100 * data1a, '*:', color='orange') - -# Note about the data -# The French data before 1720 don't match up with the published version -# Set the plot properties -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) -plt.gca().set_facecolor('white') -plt.gca().set_xlim([1688, 1788]) -plt.ylabel('% of Taxes') - -plt.tight_layout() -plt.show() - -#plt.savefig('frfinfig1.pdf', dpi=600) -#plt.savefig('frfinfig1.jpg', dpi=600) -``` - - -TO TEACH TOM: By staring at {numref}`fig1` carefully - - -## Figure 2 - - -```{code-cell} ipython3 ---- -mystnb: - figure: - caption: "Government Expenditures and Tax Revenues in Britain" - name: fig2 ---- - -# Read the data from Excel file -data2 = pd.read_excel('datasets/dette.xlsx', sheet_name='Militspe', usecols='M:X', skiprows=7, nrows=102, header=None) - -# Plot the data -plt.figure() -plt.plot(range(1689, 1791), data2.iloc[:, 5], linewidth=0.8) -plt.plot(range(1689, 1791), data2.iloc[:, 11], linewidth=0.8, color='red') -plt.plot(range(1689, 1791), data2.iloc[:, 9], linewidth=0.8, color='orange') -plt.plot(range(1689, 1791), data2.iloc[:, 8], 'o-', markerfacecolor='none', linewidth=0.8, color='purple') - -# Customize the plot -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) -plt.gca().tick_params(labelsize=12) -plt.xlim([1689, 1790]) -plt.ylabel('millions of pounds', fontsize=12) - -# Add text annotations -plt.text(1765, 1.5, 'civil', fontsize=10) -plt.text(1760, 4.2, 'civil plus debt service', fontsize=10) -plt.text(1708, 15.5, 'total govt spending', fontsize=10) -plt.text(1759, 7.3, 'revenues', fontsize=10) - - -plt.tight_layout() -plt.show() - -# Save the figure as a PDF -#plt.savefig('frfinfig2.pdf', dpi=600) -``` - - -## Figure 3 - - - - -```{code-cell} ipython3 -# Read the data from the Excel file -data1 = pd.read_excel('datasets/fig_3.xlsx', sheet_name='Sheet1', usecols='C:F', skiprows=5, nrows=30, header=None) - -data1.replace(0, np.nan, inplace=True) -``` - -```{code-cell} ipython3 ---- -mystnb: - figure: - caption: "Government Spending and Tax Revenues in France" - name: fr_fig3 ---- -# Plot the data -plt.figure() - -plt.plot(range(1759, 1789, 1), data1.iloc[:, 0], '-x', linewidth=0.8) -plt.plot(range(1759, 1789, 1), data1.iloc[:, 1], '--*', linewidth=0.8) -plt.plot(range(1759, 1789, 1), data1.iloc[:, 2], '-o', linewidth=0.8, markerfacecolor='none') -plt.plot(range(1759, 1789, 1), data1.iloc[:, 3], '-*', linewidth=0.8) - -plt.text(1775, 610, 'total spending', fontsize=10) -plt.text(1773, 325, 'military', fontsize=10) -plt.text(1773, 220, 'civil plus debt service', fontsize=10) -plt.text(1773, 80, 'debt service', fontsize=10) -plt.text(1785, 500, 'revenues', fontsize=10) - - - -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) -plt.ylim([0, 700]) -plt.ylabel('millions of livres') - -plt.tight_layout() -plt.show() - -#plt.savefig('frfinfig3.jpg', dpi=600) -``` - - -TO TEACH TOM: By staring at {numref}`fr_fig3` carefully - -```{code-cell} ipython3 -# Plot the data -plt.figure() - -plt.plot(np.arange(1759, 1789, 1)[~np.isnan(data1.iloc[:, 0])], data1.iloc[:, 0][~np.isnan(data1.iloc[:, 0])], '-x', linewidth=0.8) -plt.plot(np.arange(1759, 1789, 1)[~np.isnan(data1.iloc[:, 1])], data1.iloc[:, 1][~np.isnan(data1.iloc[:, 1])], '--*', linewidth=0.8) -plt.plot(np.arange(1759, 1789, 1)[~np.isnan(data1.iloc[:, 2])], data1.iloc[:, 2][~np.isnan(data1.iloc[:, 2])], '-o', linewidth=0.8, markerfacecolor='none') -plt.plot(np.arange(1759, 1789, 1)[~np.isnan(data1.iloc[:, 3])], data1.iloc[:, 3][~np.isnan(data1.iloc[:, 3])], '-*', linewidth=0.8) - -plt.text(1775, 610, 'total spending', fontsize=10) -plt.text(1773, 325, 'military', fontsize=10) -plt.text(1773, 220, 'civil plus debt service', fontsize=10) -plt.text(1773, 80, 'debt service', fontsize=10) -plt.text(1785, 500, 'revenues', fontsize=10) - - -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) -plt.ylim([0, 700]) -plt.ylabel('millions of livres') - -plt.tight_layout() -plt.show() - -#plt.savefig('frfinfig3_ignore_nan.jpg', dpi=600) -``` - - -## Figure 4 - - -```{code-cell} ipython3 ---- -mystnb: - figure: - caption: "Military Spending in Britain and France" - name: fig4 ---- -# French military spending, 1685-1789, in 1726 livres -data4 = pd.read_excel('datasets/dette.xlsx', sheet_name='Militspe', usecols='D', skiprows=3, nrows=105, header=None).squeeze() -years = range(1685, 1790) - -plt.figure() -plt.plot(years, data4, '*-', linewidth=0.8) - -plt.plot(range(1689, 1791), data2.iloc[:, 4], linewidth=0.8) - -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) -plt.gca().tick_params(labelsize=12) -plt.xlim([1689, 1790]) -plt.xlabel('*: France') -plt.ylabel('Millions of livres') -plt.ylim([0, 475]) - -plt.tight_layout() -plt.show() - -#plt.savefig('frfinfig4.pdf', dpi=600) -``` - -TO TEACH TOM: By staring at {numref}`fig4` carefully - -## Figure 5 - - -```{code-cell} ipython3 ---- -mystnb: - figure: - caption: "Index of real per capital revenues, France" - name: fig5 ---- -# Read data from Excel file -data5 = pd.read_excel('datasets/dette.xlsx', sheet_name='Debt', usecols='K', skiprows=41, nrows=120, header=None) - -# Plot the data -plt.figure() -plt.plot(range(1726, 1846), data5.iloc[:, 0], linewidth=0.8) - -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) -plt.gca().set_facecolor('white') -plt.gca().tick_params(labelsize=12) -plt.xlim([1726, 1845]) -plt.ylabel('1726 = 1', fontsize=12) - -plt.tight_layout() -plt.show() - -# Save the figure as a PDF -#plt.savefig('frfinfig5.pdf', dpi=600) -``` - -TO TEACH TOM: By staring at {numref}`fig5` carefully - -## Rise and Fall of the *Assignat* - - - - We have partitioned Figures~\ref{fig:fig7}, \ref{fig:fig8}, and \ref{fig:fig9} - into three periods, corresponding -to different monetary regimes or episodes. The three clouds of points in -Figure~\ref{fig:fig7} - depict different real balance-inflation relationships. Only the cloud for the -third period has the inverse relationship familiar to us now from twentieth-century -hyperinflations. The first period ends in the late summer of 1793, and is characterized -by growing real balances and moderate inflation. The second period begins and ends -with the Terror. It is marked by high real balances, around 2,500 millions, and -roughly stable prices. The fall of Robespierre in late July 1794 begins the third -of our episodes, in which real balances decline and prices rise rapidly. We interpret -these three episodes in terms of three separate theories about money: a ``backing'' -or ''real bills'' theory (the text is Adam Smith (1776)), -a legal restrictions theory (TOM: HERE PLEASE CITE -Keynes,1940, AS WELL AS Bryant/Wallace:1984 and Villamil:1988) -and a classical hyperinflation theory.% -```{note} -According to the empirical definition of hyperinflation adopted by {cite}`Cagan`, -beginning in the month that inflation exceeds 50 percent -per month and ending in the month before inflation drops below 50 percent per month -for at least a year, the *assignat* experienced a hyperinflation from May to December -1795. -``` -We view these -theories not as competitors but as alternative collections of ``if-then'' -statements about government note issues, each of which finds its conditions more -nearly met in one of these episodes than in the other two. - - - - - -## Figure 7 - - -## To Do for Zejin - -I want to tweak and consolidate the extra lines that Zejin drew on the beautiful **Figure 7**. - -I'd like to experiment in plotting the **six** extra lines all on one graph -- a pair of lines for each of our subsamples - - * one for the $y$ on $x$ regression line - * another for the $x$ on $y$ regression line - -I'd like the $y$ on $x$ and $x$ on $y$ lines to be in separate colors. - -Once we are satisfied with this new graph with its six additional lines, we can dispense with the other graphs that add one line at a time. - -Zejin, I can explain on zoom the lessons I want to convey with this. - - - -Just to recall, to compute the regression lines, Zejin wrote a function that use standard formulas -for a and b in a least squares regression y = a + b x + residual -- i.e., b is ratio of sample covariance of y,x to sample variance of x; while a is then computed from a = sample mean of y - \hat b *sample mean of x - -We could presumably tell students how to do this with a couple of numpy lines -I'd like to create three additional versions of the following figure. - -To remind you, we focused on three subperiods: - - -* subperiod 1: ("real bills period): January 1791 to July 1793 - -* subperiod 2: ("terror:): August 1793 - July 1794 - -* subperiod 3: ("classic Cagan hyperinflation): August 1794 - March 1796 - - -I can explain what this is designed to show. - - - -```{code-cell} ipython3 -def fit(x, y): - - b = np.cov(x, y)[0, 1] / np.var(x) - a = y.mean() - b * x.mean() - - return a, b -``` - -```{code-cell} ipython3 -# load data -caron = np.load('datasets/caron.npy') -nom_balances = np.load('datasets/nom_balances.npy') - -infl = np.concatenate(([np.nan], -np.log(caron[1:63, 1] / caron[0:62, 1]))) -bal = nom_balances[14:77, 1] * caron[:, 1] / 1000 -``` - -```{code-cell} ipython3 -# fit data - -# reg y on x for three periods -a1, b1 = fit(bal[1:31], infl[1:31]) -a2, b2 = fit(bal[31:44], infl[31:44]) -a3, b3 = fit(bal[44:63], infl[44:63]) - -# reg x on y for three periods -a1_rev, b1_rev = fit(infl[1:31], bal[1:31]) -a2_rev, b2_rev = fit(infl[31:44], bal[31:44]) -a3_rev, b3_rev = fit(infl[44:63], bal[44:63]) -``` - -```{code-cell} ipython3 -plt.figure() -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) - -# first subsample -plt.plot(bal[1:31], infl[1:31], 'o', markerfacecolor='none', color='blue', label='real bills period') - -# second subsample -plt.plot(bal[31:44], infl[31:44], '+', color='red', label='terror') - -# third subsample -plt.plot(bal[44:63], infl[44:63], '*', color='orange', label='classic Cagan hyperinflation') - -plt.xlabel('real balances') -plt.ylabel('inflation') -plt.legend() - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig7.pdf', dpi=600) -``` - - - -```{code-cell} ipython3 -# fit data - -# reg y on x for three periods -a1, b1 = fit(bal[1:31], infl[1:31]) -a2, b2 = fit(bal[31:44], infl[31:44]) -a3, b3 = fit(bal[44:63], infl[44:63]) - -# reg x on y for three periods -a1_rev, b1_rev = fit(infl[1:31], bal[1:31]) -a2_rev, b2_rev = fit(infl[31:44], bal[31:44]) -a3_rev, b3_rev = fit(infl[44:63], bal[44:63]) -``` - -```{code-cell} ipython3 -plt.figure() -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) - -# first subsample -plt.plot(bal[1:31], infl[1:31], 'o', markerfacecolor='none', color='blue', label='real bills period') - -# second subsample -plt.plot(bal[34:44], infl[34:44], '+', color='red', label='terror') - -# third subsample # Tom tinkered with subsample period -plt.plot(bal[44:63], infl[44:63], '*', color='orange', label='classic Cagan hyperinflation') - -plt.xlabel('real balances') -plt.ylabel('inflation') -plt.legend() - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig7.pdf', dpi=600) -``` - - -

The above graph is Tom's experimental lab. We'll delete it eventually.

- -

Zejin: below is the grapth with six lines in one graph. The lines generated by regressing y on x have the same color as the corresponding data points, while the lines generated by regressing x on y are all in green.

- -```{code-cell} ipython3 -plt.figure() -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) - -# first subsample -plt.plot(bal[1:31], infl[1:31], 'o', markerfacecolor='none', color='blue', label='real bills period') -plt.plot(bal[1:31], a1 + bal[1:31] * b1, color='blue', linewidth=0.8) -plt.plot(a1_rev + b1_rev * infl[1:31], infl[1:31], color='green', linewidth=0.8) - -# second subsample -plt.plot(bal[31:44], infl[31:44], '+', color='red', label='terror') -plt.plot(bal[31:44], a2 + bal[31:44] * b2, color='red', linewidth=0.8) -plt.plot(a2_rev + b2_rev * infl[31:44], infl[31:44], color='green', linewidth=0.8) - -# third subsample -plt.plot(bal[44:63], infl[44:63], '*', color='orange', label='classic Cagan hyperinflation') -plt.plot(bal[44:63], a3 + bal[44:63] * b3, color='orange', linewidth=0.8) -plt.plot(a3_rev + b3_rev * infl[44:63], infl[44:63], color='green', linewidth=0.8) - -plt.xlabel('real balances') -plt.ylabel('inflation') -plt.legend() -#plt.savefig('frfinfig7.pdf', dpi=600) -``` - - - -

The graph below is Tom's version of the six lines in one graph. The lines generated by regressing y on x have the same color as the corresponding data points, while the lines generated by regressing x on y are all in green.

- -```{code-cell} ipython3 -plt.figure() -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) - -# first subsample -plt.plot(bal[1:31], infl[1:31], 'o', markerfacecolor='none', color='blue', label='real bills period') -plt.plot(bal[1:31], a1 + bal[1:31] * b1, color='blue', linewidth=0.8) -plt.plot(a1_rev + b1_rev * infl[1:31], infl[1:31], color='green', linewidth=0.8) - -# second subsample -plt.plot(bal[31:44], infl[31:44], '+', color='red', label='terror') -plt.plot(bal[34:44], a2 + bal[34:44] * b2, color='red', linewidth=0.8) -plt.plot(a2_rev + b2_rev * infl[34:44], infl[34:44], color='green', linewidth=0.8) - -# third subsample -plt.plot(bal[44:63], infl[44:63], '*', color='orange', label='classic Cagan hyperinflation') -plt.plot(bal[44:63], a3 + bal[44:63] * b3, color='orange', linewidth=0.8) -plt.plot(a3_rev + b3_rev * infl[44:63], infl[44:63], color='green', linewidth=0.8) - -plt.xlabel('real balances') -plt.ylabel('inflation') -plt.legend() - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig7.pdf', dpi=600) -``` - -```{code-cell} ipython3 -plt.figure() -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) - -# first subsample -plt.plot(bal[1:31], infl[1:31], 'o', markerfacecolor='none', color='blue', label='real bills period') -plt.plot(bal[1:31], a1 + bal[1:31] * b1, color='blue') - -# second subsample -plt.plot(bal[31:44], infl[31:44], '+', color='red', label='terror') - -# third subsample -plt.plot(bal[44:63], infl[44:63], '*', color='orange', label='classic Cagan hyperinflation') - -plt.xlabel('real balances') -plt.ylabel('inflation') -plt.legend() - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig7_line1.pdf', dpi=600) -``` - -```{code-cell} ipython3 -plt.figure() -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) - -# first subsample -plt.plot(bal[1:31], infl[1:31], 'o', markerfacecolor='none', color='blue', label='real bills period') -plt.plot(a1_rev + b1_rev * infl[1:31], infl[1:31], color='blue') - -# second subsample -plt.plot(bal[31:44], infl[31:44], '+', color='red', label='terror') - -# third subsample -plt.plot(bal[44:63], infl[44:63], '*', color='orange', label='classic Cagan hyperinflation') - -plt.xlabel('real balances') -plt.ylabel('inflation') -plt.legend() - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig7_line1_rev.pdf', dpi=600) -``` - -```{code-cell} ipython3 -plt.figure() -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) - -# first subsample -plt.plot(bal[1:31], infl[1:31], 'o', markerfacecolor='none', color='blue', label='real bills period') - -# second subsample -plt.plot(bal[31:44], infl[31:44], '+', color='red', label='terror') -plt.plot(bal[31:44], a2 + bal[31:44] * b2, color='red') - -# third subsample -plt.plot(bal[44:63], infl[44:63], '*', color='orange', label='classic Cagan hyperinflation') - -plt.xlabel('real balances') -plt.ylabel('inflation') -plt.legend() - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig7_line2.pdf', dpi=600) -``` - -```{code-cell} ipython3 -plt.figure() -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) - -# first subsample -plt.plot(bal[1:31], infl[1:31], 'o', markerfacecolor='none', color='blue', label='real bills period') - -# second subsample -plt.plot(bal[31:44], infl[31:44], '+', color='red', label='terror') -plt.plot(a2_rev + b2_rev * infl[31:44], infl[31:44], color='red') - -# third subsample -plt.plot(bal[44:63], infl[44:63], '*', color='orange', label='classic Cagan hyperinflation') - -plt.xlabel('real balances') -plt.ylabel('inflation') -plt.legend() - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig7_line2_rev.pdf', dpi=600) -``` - -```{code-cell} ipython3 -plt.figure() -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) - -# first subsample -plt.plot(bal[1:31], infl[1:31], 'o', markerfacecolor='none', color='blue', label='real bills period') - -# second subsample -plt.plot(bal[31:44], infl[31:44], '+', color='red', label='terror') - -# third subsample -plt.plot(bal[44:63], infl[44:63], '*', color='orange', label='classic Cagan hyperinflation') -plt.plot(bal[44:63], a3 + bal[44:63] * b3, color='orange') - -plt.xlabel('real balances') -plt.ylabel('inflation') -plt.legend() - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig7_line3.pdf', dpi=600) -``` - -```{code-cell} ipython3 -plt.figure() -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) - -# first subsample -plt.plot(bal[1:31], infl[1:31], 'o', markerfacecolor='none', color='blue', label='real bills period') - -# second subsample -plt.plot(bal[31:44], infl[31:44], '+', color='red', label='terror') - -# third subsample -plt.plot(bal[44:63], infl[44:63], '*', color='orange', label='classic Cagan hyperinflation') -plt.plot(a3_rev + b3_rev * infl[44:63], infl[44:63], color='orange') - -plt.xlabel('real balances') -plt.ylabel('inflation') -plt.legend() - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig7_line3_rev.pdf', dpi=600) -``` - - -## Figure 8 - - -```{code-cell} ipython3 ---- -mystnb: - figure: - caption: "Real balances of assignats (in gold and goods)" - name: fig8 ---- -# Read the data from Excel file -data7 = pd.read_excel('datasets/assignat.xlsx', sheet_name='Data', usecols='P:Q', skiprows=4, nrows=80, header=None) -data7a = pd.read_excel('datasets/assignat.xlsx', sheet_name='Data', usecols='L', skiprows=4, nrows=80, header=None) - -# Create the figure and plot -plt.figure() -h = plt.plot(pd.date_range(start='1789-11-01', periods=len(data7), freq='M'), (data7a.values * [1, 1]) * data7.values, linewidth=1.) -plt.setp(h[1], linestyle='--', color='red') - -plt.vlines([pd.Timestamp('1793-07-15'), pd.Timestamp('1793-07-15')], 0, 3000, linewidth=0.8, color='orange') -plt.vlines([pd.Timestamp('1794-07-15'), pd.Timestamp('1794-07-15')], 0, 3000, linewidth=0.8, color='purple') - -plt.ylim([0, 3000]) - -# Set properties of the plot -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) -plt.gca().set_facecolor('white') -plt.gca().tick_params(labelsize=12) -plt.xlim(pd.Timestamp('1789-11-01'), pd.Timestamp('1796-06-01')) -plt.ylabel('millions of livres', fontsize=12) - -# Add text annotations -plt.text(pd.Timestamp('1793-09-01'), 200, 'Terror', fontsize=12) -plt.text(pd.Timestamp('1791-05-01'), 750, 'gold value', fontsize=12) -plt.text(pd.Timestamp('1794-10-01'), 2500, 'real value', fontsize=12) - - -plt.tight_layout() -plt.show() - -# Save the figure as a PDF -#plt.savefig('frfinfig8.pdf', dpi=600) -``` - -TO TEACH TOM: By staring at {numref}`fig8` carefully - - -## Figure 9 - - -```{code-cell} ipython3 ---- -mystnb: - figure: - caption: "Price Level and Price of Gold (log scale)" - name: fig9 ---- -# Create the figure and plot -plt.figure() -x = np.arange(1789 + 10/12, 1796 + 5/12, 1/12) -h, = plt.plot(x, 1. / data7.iloc[:, 0], linestyle='--') -h, = plt.plot(x, 1. / data7.iloc[:, 1], color='r') - -# Set properties of the plot -plt.gca().tick_params(labelsize=12) -plt.yscale('log') -plt.xlim([1789 + 10/12, 1796 + 5/12]) -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) - -# Add vertical lines -plt.axvline(x=1793 + 6.5/12, linestyle='-', linewidth=0.8, color='orange') -plt.axvline(x=1794 + 6.5/12, linestyle='-', linewidth=0.8, color='purple') - -# Add text -plt.text(1793.75, 120, 'Terror', fontsize=12) -plt.text(1795, 2.8, 'price level', fontsize=12) -plt.text(1794.9, 40, 'gold', fontsize=12) - - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig9.pdf', dpi=600) -``` - -TO TEACH TOM: By staring at {numref}`fig9` carefully - - -## Figure 11 - - - - -```{code-cell} ipython3 ---- -mystnb: - figure: - caption: "Spending (blue) and Revenues (orange), (real values)" - name: fig11 ---- -# Read data from Excel file -data11 = pd.read_excel('datasets/assignat.xlsx', sheet_name='Budgets', usecols='J:K', skiprows=22, nrows=52, header=None) - -# Prepare the x-axis data -x_data = np.concatenate([ - np.arange(1791, 1794 + 8/12, 1/12), - np.arange(1794 + 9/12, 1795 + 3/12, 1/12) -]) - -# Remove NaN values from the data -data11_clean = data11.dropna() - -# Plot the data -plt.figure() -h = plt.plot(x_data, data11_clean.values[:, 0], linewidth=0.8) -h = plt.plot(x_data, data11_clean.values[:, 1], '--', linewidth=0.8) - - - -# Set plot properties -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) -plt.gca().set_facecolor('white') -plt.gca().tick_params(axis='both', which='major', labelsize=12) -plt.xlim([1791, 1795 + 3/12]) -plt.xticks(np.arange(1791, 1796)) -plt.yticks(np.arange(0, 201, 20)) - -# Set the y-axis label -plt.ylabel('millions of livres', fontsize=12) - - - -plt.tight_layout() -plt.show() - -#plt.savefig('frfinfig11.pdf', dpi=600) -``` -TO TEACH TOM: By staring at {numref}`fig11` carefully - - -## Figure 12 - - -```{code-cell} ipython3 -# Read data from Excel file -data12 = pd.read_excel('datasets/assignat.xlsx', sheet_name='seignor', usecols='F', skiprows=6, nrows=75, header=None).squeeze() - - -# Create a figure and plot the data -plt.figure() -plt.plot(pd.date_range(start='1790', periods=len(data12), freq='M'), data12, linewidth=0.8) - -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) - -plt.axhline(y=472.42/12, color='r', linestyle=':') -plt.xticks(ticks=pd.date_range(start='1790', end='1796', freq='AS'), labels=range(1790, 1797)) -plt.xlim(pd.Timestamp('1791'), pd.Timestamp('1796-02') + pd.DateOffset(months=2)) -plt.ylabel('millions of livres', fontsize=12) -plt.text(pd.Timestamp('1793-11'), 39.5, 'revenues in 1788', verticalalignment='top', fontsize=12) - - -plt.tight_layout() -plt.show() - -#plt.savefig('frfinfig12.pdf', dpi=600) -``` - - -## Figure 13 - - -```{code-cell} ipython3 -# Read data from Excel file -data13 = pd.read_excel('datasets/assignat.xlsx', sheet_name='Exchge', usecols='P:T', skiprows=3, nrows=502, header=None) - -# Plot the last column of the data -plt.figure() -plt.plot(data13.iloc[:, -1], linewidth=0.8) - -# Set properties of the plot -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) -plt.gca().set_xlim([1, len(data13)]) - -# Set x-ticks and x-tick labels -ttt = np.arange(1, len(data13) + 1) -plt.xticks(ttt[~np.isnan(data13.iloc[:, 0])], - ['Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec', 'Jan', 'Feb', - 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep']) - -# Add text to the plot -plt.text(1, 120, '1795', fontsize=12, ha='center') -plt.text(262, 120, '1796', fontsize=12, ha='center') - -# Draw a horizontal line and add text -plt.axhline(y=186.7, color='red', linestyle='-', linewidth=0.8) -plt.text(150, 190, 'silver parity', fontsize=12) - -# Add an annotation with an arrow -plt.annotate('end of the assignat', xy=(340, 172), xytext=(380, 160), - arrowprops=dict(facecolor='black', arrowstyle='->'), fontsize=12) - - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig13.pdf', dpi=600) -``` - - -## Figure 14 - - -```{code-cell} ipython3 -# figure 14 -data14 = pd.read_excel('datasets/assignat.xlsx', sheet_name='Post-95', usecols='I', skiprows=9, nrows=91, header=None).squeeze() -data14a = pd.read_excel('datasets/assignat.xlsx', sheet_name='Post-95', usecols='F', skiprows=100, nrows=151, header=None).squeeze() - -plt.figure() -h = plt.plot(data14, '*-', markersize=2, linewidth=0.8) -plt.plot(np.concatenate([np.full(data14.shape, np.nan), data14a]), linewidth=0.8) -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) -plt.gca().set_xticks(range(20, 237, 36)) -plt.gca().set_xticklabels(range(1796, 1803)) -plt.xlabel('*: Before the 2/3 bankruptcy') -plt.ylabel('Francs') - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig14.pdf', dpi=600) -``` - - -## Figure 15 - - -```{code-cell} ipython3 -# figure 15 -data15 = pd.read_excel('datasets/assignat.xlsx', sheet_name='Post-95', usecols='N', skiprows=4, nrows=88, header=None).squeeze() - -plt.figure() -h = plt.plot(range(2, 90), data15, '*-', linewidth=0.8) -plt.setp(h, markersize=2) -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) -plt.text(47.5, 11.4, '17 brumaire', horizontalalignment='left', fontsize=12) -plt.text(49.5, 14.75, '19 brumaire', horizontalalignment='left', fontsize=12) -plt.text(15, -1, 'Vendémiaire 8', fontsize=12, horizontalalignment='center') -plt.text(45, -1, 'Brumaire', fontsize=12, horizontalalignment='center') -plt.text(75, -1, 'Frimaire', fontsize=12, horizontalalignment='center') -plt.ylim([0, 25]) -plt.xticks([], []) -plt.ylabel('Francs') - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig15.pdf', dpi=600) -``` - -```{code-cell} ipython3 - -``` +--- +jupytext: + text_representation: + extension: .md + format_name: myst + format_version: 0.13 + jupytext_version: 1.16.2 +kernelspec: + display_name: Python 3 (ipykernel) + language: python + name: python3 +--- + +# Inflation During French Revolution + + +## Overview + +This lecture describes some of the monetary and fiscal features of the French Revolution (1789-1799) described by {cite}`sargent_velde1995`. + +To finance public expenditures and service its debts, +the French government embarked on policy experiments. + +The authors of these experiments had in mind theories about how government monetary and fiscal policies affected economic outcomes. + +Some of those theories about monetary and fiscal policies still interest us today. + +* a **tax-smoothing** model like Robert Barro's {cite}`Barro1979` + + * this normative (i.e., prescriptive model) advises a government to finance temporary war-time surges in expenditures mostly by issuing government debt, raising taxes by just enough to service the additional debt issued during the wary; then, after the war, to roll over whatever debt the government had accumulated during the war; and to increase taxes after the war permanently by just enough to finance interest payments on that post-war government debt + +* **unpleasant monetarist arithmetic** like that described in this quanteon lecture {doc}`unpleasant` + + * mathematics involving compound interest governed French government debt dynamics in the decades preceding 1789; according to leading historians, that arithmetic set the stage for the French Revolution + +* a *real bills* theory of the effects of government open market operations in which the government *backs* new issues of paper money with government holdings of valuable real property or financial assets that holders of money can purchase from the government in exchange for their money. + + * The Revolutionaries learned about this theory from Adam Smith's 1776 book The Wealth of Nations + {cite}`smith2010wealth` and other contemporary sources + + * It shaped how the Revolutionaries issued a paper money called **assignats** from 1789 to 1791 + +* a classical **gold** or **silver standard** + + * Napoleon Bonaparte became head of the French government in 1799. He used this theory to guide his monetary and fiscal policies + +* a classical **inflation-tax** theory of inflation in which Philip Cagan's ({cite}`Cagan`) demand for money studied in this lecture {doc}`cagan_ree` is a key component + + * This theory helps explain French price level and money supply data from 1794 to 1797 + +* a **legal restrictions** or **financial repression** theory of the demand for real balances + + * The Twelve Members comprising the Committee of Public Safety who adminstered the Terror from June 1793 to July 1794 used this theory to shape their monetary policy + +We use matplotlib to replicate several of the graphs with which {cite}`sargent_velde1995` portrayed outcomes of these experiments + +## Data Sources + +This lecture uses data from three spreadsheets assembled by {cite}`sargent_velde1995`: + * [datasets/fig_3.xlsx](https://github.com/QuantEcon/lecture-python-intro/blob/main/lectures/datasets/fig_3.xlsx) + * [datasets/dette.xlsx](https://github.com/QuantEcon/lecture-python-intro/blob/main/lectures/datasets/dette.xlsx) + * [datasets/assignat.xlsx](https://github.com/QuantEcon/lecture-python-intro/blob/main/lectures/datasets/assignat.xlsx) + +```{code-cell} ipython3 +import numpy as np +import pandas as pd +import matplotlib.pyplot as plt +plt.rcParams.update({'font.size': 12}) + +base_url = 'https://github.com/QuantEcon/lecture-python-intro/raw/'\ + + 'main/lectures/datasets/' + +fig_3_url = f'{base_url}fig_3.xlsx' +dette_url = f'{base_url}dette.xlsx' +assignat_url = f'{base_url}assignat.xlsx' +``` + +## Government Expenditures and Taxes Collected + + + +We'll start by using `matplotlib` to construct several graphs that will provide important historical context. + +These graphs are versions of ones that appear in {cite}`sargent_velde1995`. + +These graphs show that during the 18th century + + * government expenditures in France and Great Britain both surged during four big wars, and by comparable amounts + * In Britain, tax revenues were approximately equal to government expenditures during peace times, + but were substantially less than government expenditures during wars + * In France, even in peace time, tax revenues were substantially less than government expenditures + + + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: "Military Spending in Britain and France" + name: fr_fig4 +--- +# Read the data from Excel file +data2 = pd.read_excel(dette_url, + sheet_name='Militspe', usecols='M:X', + skiprows=7, nrows=102, header=None) + +# French military spending, 1685-1789, in 1726 livres +data4 = pd.read_excel(dette_url, + sheet_name='Militspe', usecols='D', + skiprows=3, nrows=105, header=None).squeeze() + +years = range(1685, 1790) + +plt.figure() +plt.plot(years, data4, '*-', linewidth=0.8) + +plt.plot(range(1689, 1791), data2.iloc[:, 4], linewidth=0.8) + +plt.gca().spines['top'].set_visible(False) +plt.gca().spines['right'].set_visible(False) +plt.gca().tick_params(labelsize=12) +plt.xlim([1689, 1790]) +plt.xlabel('*: France') +plt.ylabel('Millions of livres') +plt.ylim([0, 475]) + +plt.tight_layout() +plt.show() +``` + +During the 18th century, Britain and France fought four large wars. + +Britain won the first three wars and lost the fourth. + +Each of those wars produced surges in both countries' government expenditures that each country somehow had to finance. + +Figure {numref}`fr_fig4` shows surges in military expenditures in France (in blue) and Great Britain. +during those four wars. + +A remarkable aspect of figure {numref}`fr_fig4` is that despite having a population less than half of France's, Britain was able to finance military expenses of about the same amounts as France's. + +This testifies to Britain's having created state institutions that could sustain high tax collections, government spending , and government borrowing. See {cite}`north1989`. + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: "Government Expenditures and Tax Revenues in Britain" + name: fr_fig2 +--- + +# Read the data from Excel file +data2 = pd.read_excel(dette_url, sheet_name='Militspe', usecols='M:X', + skiprows=7, nrows=102, header=None) + +# Plot the data +plt.figure() +plt.plot(range(1689, 1791), data2.iloc[:, 5], linewidth=0.8) +plt.plot(range(1689, 1791), data2.iloc[:, 11], linewidth=0.8, color='red') +plt.plot(range(1689, 1791), data2.iloc[:, 9], linewidth=0.8, color='orange') +plt.plot(range(1689, 1791), data2.iloc[:, 8], 'o-', + markerfacecolor='none', linewidth=0.8, color='purple') + +# Customize the plot +plt.gca().spines['top'].set_visible(False) +plt.gca().spines['right'].set_visible(False) +plt.gca().tick_params(labelsize=12) +plt.xlim([1689, 1790]) +plt.ylabel('millions of pounds', fontsize=12) + +# Add text annotations +plt.text(1765, 1.5, 'civil', fontsize=10) +plt.text(1760, 4.2, 'civil plus debt service', fontsize=10) +plt.text(1708, 15.5, 'total govt spending', fontsize=10) +plt.text(1759, 7.3, 'revenues', fontsize=10) + +plt.tight_layout() +plt.show() +``` + + +Figures {numref}`fr_fig2` and {numref}`fr_fig3` summarize British and French government fiscal policies during the century before the start of the French Revolution in 1789. + + +Before 1789, progressive forces in France admired how Britain had financed its government expenditures and wanted to redesign French fiscal arrangements to make them more like Britain's. + +Figure {numref}`fr_fig2` shows government expenditures and how it was distributed among expenditures for + + * civil (non-military) activities + * debt service, i.e., interest payments + * military expenditures (the yellow line minus the red line) + +Figure {numref}`fr_fig2` also plots total government revenues from tax collections (the purple circled line) + +Notice the surges in total government expenditures associated with surges in military expenditures +in these four wars + + * Wars against France's King Louis XIV early in the 18th century + * The War of the Austrian Succession in the 1740s + * The French and Indian War in the 1750's and 1760s + * The American War for Independence from 1775 to 1783 + +Figure {numref}`fr_fig2` indicates that + + * during times of peace, government expenditures approximately equal taxes and debt service payments neither grow nor decline over time + * during times of wars, government expenditures exceed tax revenues + * the government finances the deficit of revenues relative to expenditures by issuing debt + * after a war is over, the government's tax revenues exceed its non-interest expenditures by just enough to service the debt that the government issued to finance earlier deficits + * thus, after a war, the government does *not* raise taxes by enough to pay off its debt + * instead, it just rolls over whatever debt it inherits, raising taxes by just enough to service the interest payments on that debt + +Eighteenth-century British fiscal policy portrayed Figure {numref}`fr_fig2` thus looks very much like a text-book example of a *tax-smoothing* model like Robert Barro's {cite}`Barro1979`. + +A striking feature of the graph is what we'll label a *law of gravity* between tax collections and government expenditures. + + * levels of government expenditures at taxes attract each other + * while they can temporarily differ -- as they do during wars -- they come back together when peace returns + + + +Next we'll plot data on debt service costs as fractions of government revenues in Great Britain and France during the 18th century. + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: "Ratio of debt service to taxes, Britain and France" + name: fr_fig1 +--- + +# Read the data from the Excel file +data1 = pd.read_excel(dette_url, sheet_name='Debt', + usecols='R:S', skiprows=5, nrows=99, header=None) +data1a = pd.read_excel(dette_url, sheet_name='Debt', + usecols='P', skiprows=89, nrows=15, header=None) + +# Plot the data +plt.figure() +plt.plot(range(1690, 1789), 100 * data1.iloc[:, 1], linewidth=0.8) + +date = np.arange(1690, 1789) +index = (date < 1774) & (data1.iloc[:, 0] > 0) +plt.plot(date[index], 100 * data1[index].iloc[:, 0], + '*:', color='r', linewidth=0.8) + +# Plot the additional data +plt.plot(range(1774, 1789), 100 * data1a, '*:', color='orange') + +# Note about the data +# The French data before 1720 don't match up with the published version +# Set the plot properties +plt.gca().spines['top'].set_visible(False) +plt.gca().spines['right'].set_visible(False) +plt.gca().set_facecolor('white') +plt.gca().set_xlim([1688, 1788]) +plt.ylabel('% of Taxes') + +plt.tight_layout() +plt.show() +``` + +Figure {numref}`fr_fig1` shows that interest payments on government debt (i.e., so-called ''debt service'') were high fractions of government tax revenues in both Great Britain and France. + +{numref}`fr_fig2` showed us that in peace times Britain managed to balance its budget despite those large interest costs. + +But as we'll see in our next graph, on the eve of the French Revolution in 1788, the fiscal *law of gravity* that worked so well in Britain did not working very well in France. + + +```{code-cell} ipython3 +# Read the data from the Excel file +data1 = pd.read_excel(fig_3_url, sheet_name='Sheet1', + usecols='C:F', skiprows=5, nrows=30, header=None) + +data1.replace(0, np.nan, inplace=True) +``` + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: "Government Spending and Tax Revenues in France" + name: fr_fig3 +--- +# Plot the data +plt.figure() + +plt.plot(range(1759, 1789, 1), data1.iloc[:, 0], '-x', linewidth=0.8) +plt.plot(range(1759, 1789, 1), data1.iloc[:, 1], '--*', linewidth=0.8) +plt.plot(range(1759, 1789, 1), data1.iloc[:, 2], + '-o', linewidth=0.8, markerfacecolor='none') +plt.plot(range(1759, 1789, 1), data1.iloc[:, 3], '-*', linewidth=0.8) + +plt.text(1775, 610, 'total spending', fontsize=10) +plt.text(1773, 325, 'military', fontsize=10) +plt.text(1773, 220, 'civil plus debt service', fontsize=10) +plt.text(1773, 80, 'debt service', fontsize=10) +plt.text(1785, 500, 'revenues', fontsize=10) + +plt.gca().spines['top'].set_visible(False) +plt.gca().spines['right'].set_visible(False) +plt.ylim([0, 700]) +plt.ylabel('millions of livres') + +plt.tight_layout() +plt.show() +``` + +{numref}`fr_fig3` shows that on the eve of the French Revolution in 1788, government expenditures exceeded tax revenues. + + +Especially during and after France's expenditures to help the Americans in their War of Independence from Great Britain, growing government debt service (i.e., interest payments) +contributed to this situation. + +This was partly a consequence of the unfolding of the debt dynamics that underlies the Unpleasant Arithmetic discussed in this quantecon lecture {doc}`unpleasant`. + + +{cite}`sargent_velde1995` describe how the Ancient Regime that until 1788 had governed France had stable institutional features that made it difficult for the government to balance its budget. + +Powerful contending interests had prevented from the government from closing the gap between its +total expenditures and its tax revenues by either + + * raising taxes, or + * lowering government's non-debt service (i.e., non-interest) expenditures, or + * lowering debt service (i.e., interest) costs by rescheduling, i.e., defaulting on some debts + +Precedents and prevailing French arrangements had empowered three constituencies to block adjustments to components of the government budget constraint that they cared especially about + +* tax payers +* beneficiaries of government expenditures +* government creditors (i.e., owners of government bonds) + +When the French government had confronted a similar situation around 1720 after King Louis XIV's +Wars had left it with a debt crisis, it had sacrificed the interests of +government creditors, i.e., by defaulting enough of its debt to bring reduce interest payments down enough to balance the budget. + +Somehow, in 1789, creditors of the French government were more powerful than they had been in 1720. + +Therefore, King Louis XVI convened the Estates General together to ask them to redesign the French constitution in a way that would lower government expenditures or increase taxes, thereby +allowing him to balance the budget while also honoring his promises to creditors of the French government. + +The King called the Estates General together in an effort to promote the reforms that would +would bring sustained budget balance. + +{cite}`sargent_velde1995` describe how the French Revolutionaries set out to accomplish that. + +## Nationalization, Privatization, Debt Reduction + +In 1789, the Revolutionaries quickly reorganized the Estates General into a National Assembly. + +A first piece of business was to address the fiscal crisis, the situation that had motivated the King to convene the Estates General. + +The Revolutionaries were not socialists or communists. + +To the contrary, they respected private property and knew state-of-the-art economics. + +They knew that to honor government debts, they would have to raise new revenues or reduce expenditures. + +A coincidence was that the Catholic Church owned vast income-producing properties. + +Indeed, the capitalized value of those income streams put estimates of the value of church lands at +about the same amount as the entire French government debt. + +This coincidence fostered a three step plan for servicing the French government debt + + * nationalize the church lands -- i.e., sequester or confiscate it without paying for it + * sell the church lands + * use the proceeds from those sales to service or even retire French government debt + +The monetary theory underlying this plan had been set out by Adam Smith in his analysis of what he called *real bills* in his 1776 book +**The Wealth of Nations** {cite}`smith2010wealth`, which many of the revolutionaries had read. + +Adam Smith defined a *real bill* as a paper money note that is backed by a claims on a real asset like productive capital or inventories. + +The National Assembly put together an ingenious institutional arrangement to implement this plan. + +In response to a motion by Catholic Bishop Talleyrand (an atheist), +the National Assembly confiscated and nationalized Church lands. + +The National Assembly intended to use earnings from Church lands to service its national debt. + +To do this, it began to implement a ''privatization plan'' that would let it service its debt while +not raising taxes. + +Their plan involved issuing paper notes called ''assignats'' that entitled bearers to use them to purchase state lands. + +These paper notes would be ''as good as silver coins'' in the sense that both were acceptable means of payment in exchange for those (formerly) church lands. + +Finance Minister Necker and the Constituents of the National Assembly thus planned +to solve the privatization problem *and* the debt problem simultaneously +by creating a new currency. + +They devised a scheme to raise revenues by auctioning +the confiscated lands, thereby withdrawing paper notes issued on the security of +the lands sold by the government. + + This ''tax-backed money'' scheme propelled the National Assembly into the domains of then modern monetary theories. + +Records of debates show +how members of the Assembly marshaled theory and evidence to assess the likely +effects of their innovation. + + * Members of the National Assembly quoted David Hume and Adam Smith + * They cited John Law's System of 1720 and the American experiences with paper money fifteen years +earlier as examples of how paper money schemes can go awry + * Knowing pitfalls, they set out to avoid them + +They succeeded for two or three years. + +But after that, France entered a big War that disrupted the plan in ways that completely altered the character of France's paper money. {cite}`sargent_velde1995` describe what happened. + +## Remaking the tax code and tax administration + +In 1789 the French Revolutionaries formed a National Assembly and set out to remake French +fiscal policy. + +They wanted to honor government debts -- interests of French government creditors were well represented in the National Assembly. + +But they set out to remake the French tax code and the administrative machinery for collecting taxes. + + * they abolished many taxes + * they abolished the Ancient Regimes scheme for *tax farming* + * tax farming meant that the government had privatized tax collection by hiring private citizens -- so-called tax farmers to collect taxes, while retaining a fraction of them as payment for their services + * the great chemist Lavoisier was also a tax farmer, one of the reasons that the Committee for Public Safety sent him to the guillotine in 1794 + +As a consequence of these tax reforms, government tax revenues declined + +The next figure shows this + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: "Index of real per capital revenues, France" + name: fr_fig5 +--- +# Read data from Excel file +data5 = pd.read_excel(dette_url, sheet_name='Debt', usecols='K', + skiprows=41, nrows=120, header=None) + +# Plot the data +plt.figure() +plt.plot(range(1726, 1846), data5.iloc[:, 0], linewidth=0.8) + +plt.gca().spines['top'].set_visible(False) +plt.gca().spines['right'].set_visible(False) +plt.gca().set_facecolor('white') +plt.gca().tick_params(labelsize=12) +plt.xlim([1726, 1845]) +plt.ylabel('1726 = 1', fontsize=12) + +plt.tight_layout() +plt.show() +``` + +According to {numref}`fr_fig5`, tax revenues per capita did not rise to their pre 1789 levels +until after 1815, when Napoleon Bonaparte was exiled to St Helena and King Louis XVIII was restored to the French Crown. + + * from 1799 to 1814, Napoleon Bonaparte had other sources of revenues -- booty and reparations from provinces and nations that he defeated in war + + * from 1789 to 1799, the French Revolutionaries turned to another source to raise resources to pay for government purchases of goods and services and to service French government debt. + +And as the next figure shows, government expenditures exceeded tax revenues by substantial +amounts during the period form 1789 to 1799. + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: "Spending (blue) and Revenues (orange), (real values)" + name: fr_fig11 +--- +# Read data from Excel file +data11 = pd.read_excel(assignat_url, sheet_name='Budgets', + usecols='J:K', skiprows=22, nrows=52, header=None) + +# Prepare the x-axis data +x_data = np.concatenate([ + np.arange(1791, 1794 + 8/12, 1/12), + np.arange(1794 + 9/12, 1795 + 3/12, 1/12) +]) + +# Remove NaN values from the data +data11_clean = data11.dropna() + +# Plot the data +plt.figure() +h = plt.plot(x_data, data11_clean.values[:, 0], linewidth=0.8) +h = plt.plot(x_data, data11_clean.values[:, 1], '--', linewidth=0.8) + +# Set plot properties +plt.gca().spines['top'].set_visible(False) +plt.gca().spines['right'].set_visible(False) +plt.gca().set_facecolor('white') +plt.gca().tick_params(axis='both', which='major', labelsize=12) +plt.xlim([1791, 1795 + 3/12]) +plt.xticks(np.arange(1791, 1796)) +plt.yticks(np.arange(0, 201, 20)) + +# Set the y-axis label +plt.ylabel('millions of livres', fontsize=12) + +plt.tight_layout() +plt.show() +``` + +To cover the discrepancies between government expenditures and tax revenues revealed in {numref}`fr_fig11`, the French revolutionaries printed paper money and spent it. + +The next figure shows that by printing money, they were able to finance substantial purchases +of goods and services, including military goods and soldiers' pay. + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: "Revenues raised by printing paper money notes" + name: fr_fig24 +--- +# Read data from Excel file +data12 = pd.read_excel(assignat_url, sheet_name='seignor', + usecols='F', skiprows=6, nrows=75, header=None).squeeze() + +# Create a figure and plot the data +plt.figure() +plt.plot(pd.date_range(start='1790', periods=len(data12), freq='M'), + data12, linewidth=0.8) + +plt.gca().spines['top'].set_visible(False) +plt.gca().spines['right'].set_visible(False) + +plt.axhline(y=472.42/12, color='r', linestyle=':') +plt.xticks(ticks=pd.date_range(start='1790', + end='1796', freq='AS'), labels=range(1790, 1797)) +plt.xlim(pd.Timestamp('1791'), + pd.Timestamp('1796-02') + pd.DateOffset(months=2)) +plt.ylabel('millions of livres', fontsize=12) +plt.text(pd.Timestamp('1793-11'), 39.5, 'revenues in 1788', + verticalalignment='top', fontsize=12) + +plt.tight_layout() +plt.show() +``` + +{numref}`fr_fig24` compares the revenues raised by printing money from 1789 to 1796 with tax revenues that the Ancient Regime had raised in 1788. + +Measured in goods, revenues raised at time $t$ by printing new money equal + +$$ +\frac{M_{t+1} - M_t}{p_t} +$$ + +where + +* $M_t$ is the stock of paper money at time $t$ measured in livres +* $p_t$ is the price level at time $t$ measured in units of goods per livre at time $t$ +* $M_{t+1} - M_t$ is the amount of new money printed at time $t$ + +Notice the 1793-1794 surge in revenues raised by printing money. + +* This reflects extraordinary measures that the Committee for Public Safety adopted to force citizens to accept paper money, or else. + +Also note the abrupt fall off in revenues raised by 1797 and the absence of further observations after 1797. + +* This reflects the end of using the printing press to raise revenues. + +What French paper money entitled its holders to changed over time in interesting ways. + +These led to outcomes that vary over time and that illustrate the playing out in practice of theories that guided the Revolutionaries' monetary policy decisions. + + +The next figure shows the price level in France during the time that the Revolutionaries used paper money to finance parts of their expenditures. + +Note that we use a log scale because the price level rose so much. + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: "Price Level and Price of Gold (log scale)" + name: fr_fig9 +--- +# Read the data from Excel file +data7 = pd.read_excel(assignat_url, sheet_name='Data', + usecols='P:Q', skiprows=4, nrows=80, header=None) +data7a = pd.read_excel(assignat_url, sheet_name='Data', + usecols='L', skiprows=4, nrows=80, header=None) +# Create the figure and plot +plt.figure() +x = np.arange(1789 + 10/12, 1796 + 5/12, 1/12) +h, = plt.plot(x, 1. / data7.iloc[:, 0], linestyle='--') +h, = plt.plot(x, 1. / data7.iloc[:, 1], color='r') + +# Set properties of the plot +plt.gca().tick_params(labelsize=12) +plt.yscale('log') +plt.xlim([1789 + 10/12, 1796 + 5/12]) +plt.gca().spines['top'].set_visible(False) +plt.gca().spines['right'].set_visible(False) + +# Add vertical lines +plt.axvline(x=1793 + 6.5/12, linestyle='-', linewidth=0.8, color='orange') +plt.axvline(x=1794 + 6.5/12, linestyle='-', linewidth=0.8, color='purple') + +# Add text +plt.text(1793.75, 120, 'Terror', fontsize=12) +plt.text(1795, 2.8, 'price level', fontsize=12) +plt.text(1794.9, 40, 'gold', fontsize=12) + + +plt.tight_layout() +plt.show() +``` + +We have partioned {numref}`fr_fig9` that shows the log of the price level and {numref}`fr_fig8` +below that plots real balances $\frac{M_t}{p_t}$ into three periods that correspond to different monetary experiments or *regimes*. + +The first period ends in the late summer of 1793, and is characterized +by growing real balances and moderate inflation. + +The second period begins and ends +with the Terror. It is marked by high real balances, around 2,500 million, and +roughly stable prices. The fall of Robespierre in late July 1794 begins the third +of our episodes, in which real balances decline and prices rise rapidly. + +We interpret +these three episodes in terms of distinct theories + +* a *backing* or *real bills* theory (the classic text for this theory is Adam Smith {cite}`smith2010wealth`) +* a legal restrictions theory ( {cite}`keynes1940pay`, {cite}`bryant1984price` ) +* a classical hyperinflation theory ({cite}`Cagan`) +* +```{note} +According to the empirical definition of hyperinflation adopted by {cite}`Cagan`, +beginning in the month that inflation exceeds 50 percent +per month and ending in the month before inflation drops below 50 percent per month +for at least a year, the *assignat* experienced a hyperinflation from May to December +1795. +``` +We view these +theories not as competitors but as alternative collections of ''if-then'' +statements about government note issues, each of which finds its conditions more +nearly met in one of these episodes than in the other two. + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: "Real balances of assignats (in gold and goods)" + name: fr_fig8 +--- +# Read the data from Excel file +data7 = pd.read_excel(assignat_url, sheet_name='Data', + usecols='P:Q', skiprows=4, nrows=80, header=None) +data7a = pd.read_excel(assignat_url, sheet_name='Data', + usecols='L', skiprows=4, nrows=80, header=None) + +# Create the figure and plot +plt.figure() +h = plt.plot(pd.date_range(start='1789-11-01', periods=len(data7), freq='M'), + (data7a.values * [1, 1]) * data7.values, linewidth=1.) +plt.setp(h[1], linestyle='--', color='red') + +plt.vlines([pd.Timestamp('1793-07-15'), pd.Timestamp('1793-07-15')], + 0, 3000, linewidth=0.8, color='orange') +plt.vlines([pd.Timestamp('1794-07-15'), pd.Timestamp('1794-07-15')], + 0, 3000, linewidth=0.8, color='purple') + +plt.ylim([0, 3000]) + +# Set properties of the plot +plt.gca().spines['top'].set_visible(False) +plt.gca().spines['right'].set_visible(False) +plt.gca().set_facecolor('white') +plt.gca().tick_params(labelsize=12) +plt.xlim(pd.Timestamp('1789-11-01'), pd.Timestamp('1796-06-01')) +plt.ylabel('millions of livres', fontsize=12) + +# Add text annotations +plt.text(pd.Timestamp('1793-09-01'), 200, 'Terror', fontsize=12) +plt.text(pd.Timestamp('1791-05-01'), 750, 'gold value', fontsize=12) +plt.text(pd.Timestamp('1794-10-01'), 2500, 'real value', fontsize=12) + + +plt.tight_layout() +plt.show() +``` + +The three clouds of points in Figure +{numref}`fr_fig104` + depict different real balance-inflation relationships. + +Only the cloud for the +third period has the inverse relationship familiar to us now from twentieth-century +hyperinflations. + + + + +* subperiod 1: ("*real bills* period): January 1791 to July 1793 + +* subperiod 2: ("terror"): August 1793 - July 1794 + +* subperiod 3: ("classic Cagan hyperinflation"): August 1794 - March 1796 + +```{code-cell} ipython3 +def fit(x, y): + + b = np.cov(x, y)[0, 1] / np.var(x) + a = y.mean() - b * x.mean() + + return a, b +``` + +```{code-cell} ipython3 +# Load data +caron = np.load('datasets/caron.npy') +nom_balances = np.load('datasets/nom_balances.npy') + +infl = np.concatenate(([np.nan], + -np.log(caron[1:63, 1] / caron[0:62, 1]))) +bal = nom_balances[14:77, 1] * caron[:, 1] / 1000 +``` + +```{code-cell} ipython3 +# Regress y on x for three periods +a1, b1 = fit(bal[1:31], infl[1:31]) +a2, b2 = fit(bal[31:44], infl[31:44]) +a3, b3 = fit(bal[44:63], infl[44:63]) + +# Regress x on y for three periods +a1_rev, b1_rev = fit(infl[1:31], bal[1:31]) +a2_rev, b2_rev = fit(infl[31:44], bal[31:44]) +a3_rev, b3_rev = fit(infl[44:63], bal[44:63]) +``` + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: "Inflation and Real Balances" + name: fr_fig104 +--- +plt.figure() +plt.gca().spines['top'].set_visible(False) +plt.gca().spines['right'].set_visible(False) + +# First subsample +plt.plot(bal[1:31], infl[1:31], 'o', markerfacecolor='none', + color='blue', label='real bills period') + +# Second subsample +plt.plot(bal[31:44], infl[31:44], '+', color='red', label='terror') + +# Third subsample +plt.plot(bal[44:63], infl[44:63], '*', + color='orange', label='classic Cagan hyperinflation') + +plt.xlabel('real balances') +plt.ylabel('inflation') +plt.legend() + +plt.tight_layout() +plt.show() +``` + +The three clouds of points in {numref}`fr_fig104` evidently + depict different real balance-inflation relationships. + +Only the cloud for the +third period has the inverse relationship familiar to us now from twentieth-century +hyperinflations. + + To bring this out, we'll use linear regressions to draw straight lines that compress the + inflation-real balance relationship for our three sub-periods. + + Before we do that, we'll drop some of the early observations during the terror period + to obtain the following graph. + +```{code-cell} ipython3 +# Regress y on x for three periods +a1, b1 = fit(bal[1:31], infl[1:31]) +a2, b2 = fit(bal[31:44], infl[31:44]) +a3, b3 = fit(bal[44:63], infl[44:63]) + +# Regress x on y for three periods +a1_rev, b1_rev = fit(infl[1:31], bal[1:31]) +a2_rev, b2_rev = fit(infl[31:44], bal[31:44]) +a3_rev, b3_rev = fit(infl[44:63], bal[44:63]) +``` + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: "Inflation and Real Balances" + name: fr_fig104b +--- +plt.figure() +plt.gca().spines['top'].set_visible(False) +plt.gca().spines['right'].set_visible(False) + +# First subsample +plt.plot(bal[1:31], infl[1:31], 'o', markerfacecolor='none', color='blue', label='real bills period') + +# Second subsample +plt.plot(bal[34:44], infl[34:44], '+', color='red', label='terror') + +# Third subsample +plt.plot(bal[44:63], infl[44:63], '*', color='orange', label='classic Cagan hyperinflation') + +plt.xlabel('real balances') +plt.ylabel('inflation') +plt.legend() + +plt.tight_layout() +plt.show() +``` + +Now let's regress inflation on real balances during the *real bills* period and plot the regression +line. + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: "Inflation and Real Balances" + name: fr_fig104c +--- +plt.figure() +plt.gca().spines['top'].set_visible(False) +plt.gca().spines['right'].set_visible(False) + +# First subsample +plt.plot(bal[1:31], infl[1:31], 'o', markerfacecolor='none', + color='blue', label='real bills period') +plt.plot(bal[1:31], a1 + bal[1:31] * b1, color='blue') + +# Second subsample +plt.plot(bal[31:44], infl[31:44], '+', color='red', label='terror') + +# Third subsample +plt.plot(bal[44:63], infl[44:63], '*', + color='orange', label='classic Cagan hyperinflation') + +plt.xlabel('real balances') +plt.ylabel('inflation') +plt.legend() + +plt.tight_layout() +plt.show() +``` + +The regression line in {numref}`fr_fig104c` shows that large increases in real balances of +assignats (paper money) were accompanied by only modest rises in the price level, an outcome in line +with the *real bills* theory. + +During this period, assignats were claims on church lands. + +But towards the end of this period, the price level started to rise and real balances to fall +as the government continued to print money but stopped selling church land. + +To get people to hold that paper money, the government forced people to hold it by using legal restrictions. + +Now let's regress real balances on inflation during the terror and plot the regression +line. + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: "Inflation and Real Balances" + name: fr_fig104d +--- +plt.figure() +plt.gca().spines['top'].set_visible(False) +plt.gca().spines['right'].set_visible(False) + +# First subsample +plt.plot(bal[1:31], infl[1:31], 'o', markerfacecolor='none', + color='blue', label='real bills period') + +# Second subsample +plt.plot(bal[31:44], infl[31:44], '+', color='red', label='terror') +plt.plot(a2_rev + b2_rev * infl[31:44], infl[31:44], color='red') + +# Third subsample +plt.plot(bal[44:63], infl[44:63], '*', + color='orange', label='classic Cagan hyperinflation') + +plt.xlabel('real balances') +plt.ylabel('inflation') +plt.legend() + +plt.tight_layout() +plt.show() +``` + +The regression line in {numref}`fr_fig104d` shows that large increases in real balances of +assignats (paper money) were accompanied by little upward price level pressure, even some declines in prices. + +This reflects how well legal restrictions -- financial repression -- was working during the period of the Terror. + +But the Terror ended in July 1794. That unleashed a big inflation as people tried to find other ways to transact and store values. + +The following two graphs are for the classical hyperinflation period. + +One regresses inflation on real balances, the other regresses real balances on inflation. + +Both show a prounced inverse relationship that is the hallmark of the hyperinflations studied by +Cagan {cite}`Cagan`. + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: "Inflation and Real Balances" + name: fr_fig104e +--- +plt.figure() +plt.gca().spines['top'].set_visible(False) +plt.gca().spines['right'].set_visible(False) + +# First subsample +plt.plot(bal[1:31], infl[1:31], 'o', markerfacecolor='none', + color='blue', label='real bills period') + +# Second subsample +plt.plot(bal[31:44], infl[31:44], '+', color='red', label='terror') + +# Third subsample +plt.plot(bal[44:63], infl[44:63], '*', + color='orange', label='classic Cagan hyperinflation') +plt.plot(bal[44:63], a3 + bal[44:63] * b3, color='orange') + +plt.xlabel('real balances') +plt.ylabel('inflation') +plt.legend() + +plt.tight_layout() +plt.show() +``` + +{numref}`fr_fig104e` shows the results of regressing inflation on real balances during the +period of the hyperinflation. + +```{code-cell} ipython3 +--- +mystnb: + figure: + caption: "Inflation and Real Balances" + name: fr_fig104f +--- +plt.figure() +plt.gca().spines['top'].set_visible(False) +plt.gca().spines['right'].set_visible(False) + +# First subsample +plt.plot(bal[1:31], infl[1:31], 'o', + markerfacecolor='none', color='blue', label='real bills period') + +# Second subsample +plt.plot(bal[31:44], infl[31:44], '+', color='red', label='terror') + +# Third subsample +plt.plot(bal[44:63], infl[44:63], '*', + color='orange', label='classic Cagan hyperinflation') +plt.plot(a3_rev + b3_rev * infl[44:63], infl[44:63], color='orange') + +plt.xlabel('real balances') +plt.ylabel('inflation') +plt.legend() + +plt.tight_layout() +plt.show() +``` + +{numref}`fr_fig104e` shows the results of regressing real money balances on inflation during the +period of the hyperinflation. + +## Hyperinflation Ends + +{cite}`sargent_velde1995` tell how in 1797 the Revolutionary government abruptly ended the inflation by + + * repudiating 2/3 of the national debt, and thereby + * eliminating the net-of-interest government defict + * no longer printing money, but instead + * using gold and silver coins as money + +In 1799, Napoleon Bonaparte became first consul and for the next 15 years used resources confiscated from conquered territories to help pay for French government expenditures. + +## Underlying Theories + +This lecture sets the stage for studying theories of inflation and the government monetary and fiscal policies that bring it about. + +A *monetarist theory of the price level* is described in this quantecon lecture {doc}`cagan_ree`. + +That lecture sets the stage for these quantecon lectures {doc}`money_inflation` and {doc}`unpleasant`. diff --git a/lectures/french_rev_tom.md b/lectures/french_rev_tom.md deleted file mode 100644 index e6c19e22..00000000 --- a/lectures/french_rev_tom.md +++ /dev/null @@ -1,1078 +0,0 @@ ---- -jupytext: - text_representation: - extension: .md - format_name: myst - format_version: 0.13 - jupytext_version: 1.16.1 -kernelspec: - display_name: Python 3 (ipykernel) - language: python - name: python3 ---- - - -# Inflation During French Revolution - - -## Overview - -This lecture describes some monetary and fiscal features of the French Revolution -described by {cite}`sargent_velde1995`. - -In order to finance public expenditures and service debts issued by earlier French governments, -successive French governments performed several policy experiments. - -Authors of these experiments were guided by their having decided to put in place monetary-fiscal policies recommended by particular theories. - -As a consequence, data on money growth and inflation from the period 1789 to 1787 at least temorarily illustrated outcomes predicted by these arrangements: - -* some *unpleasant monetarist arithmetic* like that described in this quanteon lecture XXX -that governed French government debt dynamics in the decades preceding 1789 - -* a *real bills* theory of the effects of government open market operations in which the government *backs* its issues of paper money with valuable real property or financial assets - -* a classical ``gold or silver'' standard - -* a classical inflation-tax theory of inflation in which Philip Cagan's demand for money studied -in this lecture is a key component - -* a *legal restrictions* or *financial repression* theory of the demand for real balances - -We use matplotlib to replicate several of the graphs that they used to present salient patterns. - - - -## Data Sources - -This notebook uses data from three spreadsheets: - - * datasets/fig_3.ods - * datasets/dette.xlsx - * datasets/assignat.xlsx - -```{code-cell} ipython3 -import numpy as np -import pandas as pd -import matplotlib.pyplot as plt -plt.rcParams.update({'font.size': 12}) -``` - - -## Figure 1 - - -```{code-cell} ipython3 ---- -mystnb: - figure: - caption: "Ratio of debt service to taxes, Britain and France" - name: fig1 ---- - -# Read the data from the Excel file -data1 = pd.read_excel('datasets/dette.xlsx', sheet_name='Debt', usecols='R:S', skiprows=5, nrows=99, header=None) -data1a = pd.read_excel('datasets/dette.xlsx', sheet_name='Debt', usecols='P', skiprows=89, nrows=15, header=None) - -# Plot the data -plt.figure() -plt.plot(range(1690, 1789), 100 * data1.iloc[:, 1], linewidth=0.8) - -date = np.arange(1690, 1789) -index = (date < 1774) & (data1.iloc[:, 0] > 0) -plt.plot(date[index], 100 * data1[index].iloc[:, 0], '*:', color='r', linewidth=0.8) - -# Plot the additional data -plt.plot(range(1774, 1789), 100 * data1a, '*:', color='orange') - -# Note about the data -# The French data before 1720 don't match up with the published version -# Set the plot properties -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) -plt.gca().set_facecolor('white') -plt.gca().set_xlim([1688, 1788]) -plt.ylabel('% of Taxes') - -plt.tight_layout() -plt.show() - -#plt.savefig('frfinfig1.pdf', dpi=600) -#plt.savefig('frfinfig1.jpg', dpi=600) -``` - - - {numref}`fig1` plots ratios of debt service to total taxes collected for Great Britain and France. - The figure shows - - * ratios of debt service to taxes rise for both countries at the beginning of the century and at the end of the century - * ratios that are similar for both countries in most years - - - - - -## Figure 2 - - -```{code-cell} ipython3 ---- -mystnb: - figure: - caption: "Government Expenditures and Tax Revenues in Britain" - name: fig2 ---- - -# Read the data from Excel file -data2 = pd.read_excel('datasets/dette.xlsx', sheet_name='Militspe', usecols='M:X', skiprows=7, nrows=102, header=None) - -# Plot the data -plt.figure() -plt.plot(range(1689, 1791), data2.iloc[:, 5], linewidth=0.8) -plt.plot(range(1689, 1791), data2.iloc[:, 11], linewidth=0.8, color='red') -plt.plot(range(1689, 1791), data2.iloc[:, 9], linewidth=0.8, color='orange') -plt.plot(range(1689, 1791), data2.iloc[:, 8], 'o-', markerfacecolor='none', linewidth=0.8, color='purple') - -# Customize the plot -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) -plt.gca().tick_params(labelsize=12) -plt.xlim([1689, 1790]) -plt.ylabel('millions of pounds', fontsize=12) - -# Add text annotations -plt.text(1765, 1.5, 'civil', fontsize=10) -plt.text(1760, 4.2, 'civil plus debt service', fontsize=10) -plt.text(1708, 15.5, 'total govt spending', fontsize=10) -plt.text(1759, 7.3, 'revenues', fontsize=10) - - -plt.tight_layout() -plt.show() - -# Save the figure as a PDF -#plt.savefig('frfinfig2.pdf', dpi=600) -``` - - - -{numref}`fig2` plots total taxes, total government expenditures, and the composition of government expenditures in Great Britain during much of the 18th century. - -## Figure 3 - - - - -```{code-cell} ipython3 -# Read the data from the Excel file -data1 = pd.read_excel('datasets/fig_3.xlsx', sheet_name='Sheet1', usecols='C:F', skiprows=5, nrows=30, header=None) - -data1.replace(0, np.nan, inplace=True) -``` - -```{code-cell} ipython3 ---- -mystnb: - figure: - caption: "Government Spending and Tax Revenues in France" - name: fr_fig3 ---- -# Plot the data -plt.figure() - -plt.plot(range(1759, 1789, 1), data1.iloc[:, 0], '-x', linewidth=0.8) -plt.plot(range(1759, 1789, 1), data1.iloc[:, 1], '--*', linewidth=0.8) -plt.plot(range(1759, 1789, 1), data1.iloc[:, 2], '-o', linewidth=0.8, markerfacecolor='none') -plt.plot(range(1759, 1789, 1), data1.iloc[:, 3], '-*', linewidth=0.8) - -plt.text(1775, 610, 'total spending', fontsize=10) -plt.text(1773, 325, 'military', fontsize=10) -plt.text(1773, 220, 'civil plus debt service', fontsize=10) -plt.text(1773, 80, 'debt service', fontsize=10) -plt.text(1785, 500, 'revenues', fontsize=10) - - - -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) -plt.ylim([0, 700]) -plt.ylabel('millions of livres') - -plt.tight_layout() -plt.show() - -#plt.savefig('frfinfig3.jpg', dpi=600) -``` - - -TO TEACH TOM: By staring at {numref}`fr_fig3` carefully - -{numref}`fr_fig3` plots total taxes, total government expenditures, and the composition of government expenditures in France during much of the 18th century. - -```{code-cell} ipython3 - ---- -mystnb: - figure: - caption: "Government Spending and Tax Revenues in France" - name: fr_fig3b ---- -# Plot the data -plt.figure() - -plt.plot(np.arange(1759, 1789, 1)[~np.isnan(data1.iloc[:, 0])], data1.iloc[:, 0][~np.isnan(data1.iloc[:, 0])], '-x', linewidth=0.8) -plt.plot(np.arange(1759, 1789, 1)[~np.isnan(data1.iloc[:, 1])], data1.iloc[:, 1][~np.isnan(data1.iloc[:, 1])], '--*', linewidth=0.8) -plt.plot(np.arange(1759, 1789, 1)[~np.isnan(data1.iloc[:, 2])], data1.iloc[:, 2][~np.isnan(data1.iloc[:, 2])], '-o', linewidth=0.8, markerfacecolor='none') -plt.plot(np.arange(1759, 1789, 1)[~np.isnan(data1.iloc[:, 3])], data1.iloc[:, 3][~np.isnan(data1.iloc[:, 3])], '-*', linewidth=0.8) - -plt.text(1775, 610, 'total spending', fontsize=10) -plt.text(1773, 325, 'military', fontsize=10) -plt.text(1773, 220, 'civil plus debt service', fontsize=10) -plt.text(1773, 80, 'debt service', fontsize=10) -plt.text(1785, 500, 'revenues', fontsize=10) - - -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) -plt.ylim([0, 700]) -plt.ylabel('millions of livres') - -plt.tight_layout() -plt.show() - -#plt.savefig('frfinfig3_ignore_nan.jpg', dpi=600) -``` - -{numref}`fr_fig3b` plots total taxes, total government expenditures, and the composition of government expenditures in France during much of the 18th century. - - - - -## Figure 4 - - -```{code-cell} ipython3 ---- -mystnb: - figure: - caption: "Military Spending in Britain and France" - name: fig4 ---- -# French military spending, 1685-1789, in 1726 livres -data4 = pd.read_excel('datasets/dette.xlsx', sheet_name='Militspe', usecols='D', skiprows=3, nrows=105, header=None).squeeze() -years = range(1685, 1790) - -plt.figure() -plt.plot(years, data4, '*-', linewidth=0.8) - -plt.plot(range(1689, 1791), data2.iloc[:, 4], linewidth=0.8) - -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) -plt.gca().tick_params(labelsize=12) -plt.xlim([1689, 1790]) -plt.xlabel('*: France') -plt.ylabel('Millions of livres') -plt.ylim([0, 475]) - -plt.tight_layout() -plt.show() - -#plt.savefig('frfinfig4.pdf', dpi=600) -``` - - -{numref}`fig4` plots total taxes, total government expenditures, and the composition of government expenditures in France during much of the 18th century. - -TO TEACH TOM: By staring at {numref}`fig4` carefully - - -## Figure 5 - - -```{code-cell} ipython3 ---- -mystnb: - figure: - caption: "Index of real per capital revenues, France" - name: fig5 ---- -# Read data from Excel file -data5 = pd.read_excel('datasets/dette.xlsx', sheet_name='Debt', usecols='K', skiprows=41, nrows=120, header=None) - -# Plot the data -plt.figure() -plt.plot(range(1726, 1846), data5.iloc[:, 0], linewidth=0.8) - -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) -plt.gca().set_facecolor('white') -plt.gca().tick_params(labelsize=12) -plt.xlim([1726, 1845]) -plt.ylabel('1726 = 1', fontsize=12) - -plt.tight_layout() -plt.show() - -# Save the figure as a PDF -#plt.savefig('frfinfig5.pdf', dpi=600) -``` - -TO TEACH TOM: By staring at {numref}`fig5` carefully - -## Rise and Fall of the *Assignat* - - - - We have partitioned Figures~\ref{fig:fig7}, \ref{fig:fig8}, and \ref{fig:fig9} - into three periods, corresponding -to different monetary regimes or episodes. The three clouds of points in -Figure~\ref{fig:fig7} - depict different real balance-inflation relationships. Only the cloud for the -third period has the inverse relationship familiar to us now from twentieth-century -hyperinflations. The first period ends in the late summer of 1793, and is characterized -by growing real balances and moderate inflation. The second period begins and ends -with the Terror. It is marked by high real balances, around 2,500 millions, and -roughly stable prices. The fall of Robespierre in late July 1794 begins the third -of our episodes, in which real balances decline and prices rise rapidly. We interpret -these three episodes in terms of three separate theories about money: a ``backing'' -or ''real bills'' theory (the text is Adam Smith (1776)), -a legal restrictions theory (TOM: HERE PLEASE CITE -Keynes,1940, AS WELL AS Bryant/Wallace:1984 and Villamil:1988) -and a classical hyperinflation theory.% -```{note} -According to the empirical definition of hyperinflation adopted by {cite}`Cagan`, -beginning in the month that inflation exceeds 50 percent -per month and ending in the month before inflation drops below 50 percent per month -for at least a year, the *assignat* experienced a hyperinflation from May to December -1795. -``` -We view these -theories not as competitors but as alternative collections of ``if-then'' -statements about government note issues, each of which finds its conditions more -nearly met in one of these episodes than in the other two. - - - - - -## Figure 7 - - -## To Do for Zejin - -I want to tweak and consolidate the extra lines that Zejin drew on the beautiful **Figure 7**. - -I'd like to experiment in plotting the **six** extra lines all on one graph -- a pair of lines for each of our subsamples - - * one for the $y$ on $x$ regression line - * another for the $x$ on $y$ regression line - -I'd like the $y$ on $x$ and $x$ on $y$ lines to be in separate colors. - -Once we are satisfied with this new graph with its six additional lines, we can dispense with the other graphs that add one line at a time. - -Zejin, I can explain on zoom the lessons I want to convey with this. - - - -Just to recall, to compute the regression lines, Zejin wrote a function that use standard formulas -for a and b in a least squares regression y = a + b x + residual -- i.e., b is ratio of sample covariance of y,x to sample variance of x; while a is then computed from a = sample mean of y - \hat b *sample mean of x - -We could presumably tell students how to do this with a couple of numpy lines -I'd like to create three additional versions of the following figure. - -To remind you, we focused on three subperiods: - - -* subperiod 1: ("real bills period): January 1791 to July 1793 - -* subperiod 2: ("terror:): August 1793 - July 1794 - -* subperiod 3: ("classic Cagan hyperinflation): August 1794 - March 1796 - - -I can explain what this is designed to show. - - - -```{code-cell} ipython3 -def fit(x, y): - - b = np.cov(x, y)[0, 1] / np.var(x) - a = y.mean() - b * x.mean() - - return a, b -``` - -```{code-cell} ipython3 -# load data -caron = np.load('datasets/caron.npy') -nom_balances = np.load('datasets/nom_balances.npy') - -infl = np.concatenate(([np.nan], -np.log(caron[1:63, 1] / caron[0:62, 1]))) -bal = nom_balances[14:77, 1] * caron[:, 1] / 1000 -``` - -```{code-cell} ipython3 -# fit data - -# reg y on x for three periods -a1, b1 = fit(bal[1:31], infl[1:31]) -a2, b2 = fit(bal[31:44], infl[31:44]) -a3, b3 = fit(bal[44:63], infl[44:63]) - -# reg x on y for three periods -a1_rev, b1_rev = fit(infl[1:31], bal[1:31]) -a2_rev, b2_rev = fit(infl[31:44], bal[31:44]) -a3_rev, b3_rev = fit(infl[44:63], bal[44:63]) -``` - -```{code-cell} ipython3 -plt.figure() -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) - -# first subsample -plt.plot(bal[1:31], infl[1:31], 'o', markerfacecolor='none', color='blue', label='real bills period') - -# second subsample -plt.plot(bal[31:44], infl[31:44], '+', color='red', label='terror') - -# third subsample -plt.plot(bal[44:63], infl[44:63], '*', color='orange', label='classic Cagan hyperinflation') - -plt.xlabel('real balances') -plt.ylabel('inflation') -plt.legend() - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig7.pdf', dpi=600) -``` - - - -```{code-cell} ipython3 -# fit data - -# reg y on x for three periods -a1, b1 = fit(bal[1:31], infl[1:31]) -a2, b2 = fit(bal[31:44], infl[31:44]) -a3, b3 = fit(bal[44:63], infl[44:63]) - -# reg x on y for three periods -a1_rev, b1_rev = fit(infl[1:31], bal[1:31]) -a2_rev, b2_rev = fit(infl[31:44], bal[31:44]) -a3_rev, b3_rev = fit(infl[44:63], bal[44:63]) -``` - -```{code-cell} ipython3 -plt.figure() -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) - -# first subsample -plt.plot(bal[1:31], infl[1:31], 'o', markerfacecolor='none', color='blue', label='real bills period') - -# second subsample -plt.plot(bal[34:44], infl[34:44], '+', color='red', label='terror') - -# third subsample # Tom tinkered with subsample period -plt.plot(bal[44:63], infl[44:63], '*', color='orange', label='classic Cagan hyperinflation') - -plt.xlabel('real balances') -plt.ylabel('inflation') -plt.legend() - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig7.pdf', dpi=600) -``` - - -

The above graph is Tom's experimental lab. We'll delete it eventually.

- -

Zejin: below is the grapth with six lines in one graph. The lines generated by regressing y on x have the same color as the corresponding data points, while the lines generated by regressing x on y are all in green.

- -```{code-cell} ipython3 -plt.figure() -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) - -# first subsample -plt.plot(bal[1:31], infl[1:31], 'o', markerfacecolor='none', color='blue', label='real bills period') -plt.plot(bal[1:31], a1 + bal[1:31] * b1, color='blue', linewidth=0.8) -plt.plot(a1_rev + b1_rev * infl[1:31], infl[1:31], color='green', linewidth=0.8) - -# second subsample -plt.plot(bal[31:44], infl[31:44], '+', color='red', label='terror') -plt.plot(bal[31:44], a2 + bal[31:44] * b2, color='red', linewidth=0.8) -plt.plot(a2_rev + b2_rev * infl[31:44], infl[31:44], color='green', linewidth=0.8) - -# third subsample -plt.plot(bal[44:63], infl[44:63], '*', color='orange', label='classic Cagan hyperinflation') -plt.plot(bal[44:63], a3 + bal[44:63] * b3, color='orange', linewidth=0.8) -plt.plot(a3_rev + b3_rev * infl[44:63], infl[44:63], color='green', linewidth=0.8) - -plt.xlabel('real balances') -plt.ylabel('inflation') -plt.legend() -#plt.savefig('frfinfig7.pdf', dpi=600) -``` - - - -

The graph below is Tom's version of the six lines in one graph. The lines generated by regressing y on x have the same color as the corresponding data points, while the lines generated by regressing x on y are all in green.

- -```{code-cell} ipython3 -plt.figure() -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) - -# first subsample -plt.plot(bal[1:31], infl[1:31], 'o', markerfacecolor='none', color='blue', label='real bills period') -plt.plot(bal[1:31], a1 + bal[1:31] * b1, color='blue', linewidth=0.8) -plt.plot(a1_rev + b1_rev * infl[1:31], infl[1:31], color='green', linewidth=0.8) - -# second subsample -plt.plot(bal[31:44], infl[31:44], '+', color='red', label='terror') -plt.plot(bal[34:44], a2 + bal[34:44] * b2, color='red', linewidth=0.8) -plt.plot(a2_rev + b2_rev * infl[34:44], infl[34:44], color='green', linewidth=0.8) - -# third subsample -plt.plot(bal[44:63], infl[44:63], '*', color='orange', label='classic Cagan hyperinflation') -plt.plot(bal[44:63], a3 + bal[44:63] * b3, color='orange', linewidth=0.8) -plt.plot(a3_rev + b3_rev * infl[44:63], infl[44:63], color='green', linewidth=0.8) - -plt.xlabel('real balances') -plt.ylabel('inflation') -plt.legend() - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig7.pdf', dpi=600) -``` - -```{code-cell} ipython3 -plt.figure() -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) - -# first subsample -plt.plot(bal[1:31], infl[1:31], 'o', markerfacecolor='none', color='blue', label='real bills period') -plt.plot(bal[1:31], a1 + bal[1:31] * b1, color='blue') - -# second subsample -plt.plot(bal[31:44], infl[31:44], '+', color='red', label='terror') - -# third subsample -plt.plot(bal[44:63], infl[44:63], '*', color='orange', label='classic Cagan hyperinflation') - -plt.xlabel('real balances') -plt.ylabel('inflation') -plt.legend() - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig7_line1.pdf', dpi=600) -``` - -```{code-cell} ipython3 -plt.figure() -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) - -# first subsample -plt.plot(bal[1:31], infl[1:31], 'o', markerfacecolor='none', color='blue', label='real bills period') -plt.plot(a1_rev + b1_rev * infl[1:31], infl[1:31], color='blue') - -# second subsample -plt.plot(bal[31:44], infl[31:44], '+', color='red', label='terror') - -# third subsample -plt.plot(bal[44:63], infl[44:63], '*', color='orange', label='classic Cagan hyperinflation') - -plt.xlabel('real balances') -plt.ylabel('inflation') -plt.legend() - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig7_line1_rev.pdf', dpi=600) -``` - -```{code-cell} ipython3 -plt.figure() -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) - -# first subsample -plt.plot(bal[1:31], infl[1:31], 'o', markerfacecolor='none', color='blue', label='real bills period') - -# second subsample -plt.plot(bal[31:44], infl[31:44], '+', color='red', label='terror') -plt.plot(bal[31:44], a2 + bal[31:44] * b2, color='red') - -# third subsample -plt.plot(bal[44:63], infl[44:63], '*', color='orange', label='classic Cagan hyperinflation') - -plt.xlabel('real balances') -plt.ylabel('inflation') -plt.legend() - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig7_line2.pdf', dpi=600) -``` - -```{code-cell} ipython3 -plt.figure() -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) - -# first subsample -plt.plot(bal[1:31], infl[1:31], 'o', markerfacecolor='none', color='blue', label='real bills period') - -# second subsample -plt.plot(bal[31:44], infl[31:44], '+', color='red', label='terror') -plt.plot(a2_rev + b2_rev * infl[31:44], infl[31:44], color='red') - -# third subsample -plt.plot(bal[44:63], infl[44:63], '*', color='orange', label='classic Cagan hyperinflation') - -plt.xlabel('real balances') -plt.ylabel('inflation') -plt.legend() - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig7_line2_rev.pdf', dpi=600) -``` - -```{code-cell} ipython3 -plt.figure() -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) - -# first subsample -plt.plot(bal[1:31], infl[1:31], 'o', markerfacecolor='none', color='blue', label='real bills period') - -# second subsample -plt.plot(bal[31:44], infl[31:44], '+', color='red', label='terror') - -# third subsample -plt.plot(bal[44:63], infl[44:63], '*', color='orange', label='classic Cagan hyperinflation') -plt.plot(bal[44:63], a3 + bal[44:63] * b3, color='orange') - -plt.xlabel('real balances') -plt.ylabel('inflation') -plt.legend() - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig7_line3.pdf', dpi=600) -``` - -```{code-cell} ipython3 -plt.figure() -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) - -# first subsample -plt.plot(bal[1:31], infl[1:31], 'o', markerfacecolor='none', color='blue', label='real bills period') - -# second subsample -plt.plot(bal[31:44], infl[31:44], '+', color='red', label='terror') - -# third subsample -plt.plot(bal[44:63], infl[44:63], '*', color='orange', label='classic Cagan hyperinflation') -plt.plot(a3_rev + b3_rev * infl[44:63], infl[44:63], color='orange') - -plt.xlabel('real balances') -plt.ylabel('inflation') -plt.legend() - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig7_line3_rev.pdf', dpi=600) -``` - - -## Figure 8 - - -```{code-cell} ipython3 ---- -mystnb: - figure: - caption: "Real balances of assignats (in gold and goods)" - name: fig8 ---- -# Read the data from Excel file -data7 = pd.read_excel('datasets/assignat.xlsx', sheet_name='Data', usecols='P:Q', skiprows=4, nrows=80, header=None) -data7a = pd.read_excel('datasets/assignat.xlsx', sheet_name='Data', usecols='L', skiprows=4, nrows=80, header=None) - -# Create the figure and plot -plt.figure() -h = plt.plot(pd.date_range(start='1789-11-01', periods=len(data7), freq='M'), (data7a.values * [1, 1]) * data7.values, linewidth=1.) -plt.setp(h[1], linestyle='--', color='red') - -plt.vlines([pd.Timestamp('1793-07-15'), pd.Timestamp('1793-07-15')], 0, 3000, linewidth=0.8, color='orange') -plt.vlines([pd.Timestamp('1794-07-15'), pd.Timestamp('1794-07-15')], 0, 3000, linewidth=0.8, color='purple') - -plt.ylim([0, 3000]) - -# Set properties of the plot -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) -plt.gca().set_facecolor('white') -plt.gca().tick_params(labelsize=12) -plt.xlim(pd.Timestamp('1789-11-01'), pd.Timestamp('1796-06-01')) -plt.ylabel('millions of livres', fontsize=12) - -# Add text annotations -plt.text(pd.Timestamp('1793-09-01'), 200, 'Terror', fontsize=12) -plt.text(pd.Timestamp('1791-05-01'), 750, 'gold value', fontsize=12) -plt.text(pd.Timestamp('1794-10-01'), 2500, 'real value', fontsize=12) - - -plt.tight_layout() -plt.show() - -# Save the figure as a PDF -#plt.savefig('frfinfig8.pdf', dpi=600) -``` - -TO TEACH TOM: By staring at {numref}`fig8` carefully - - -## Figure 9 - - -```{code-cell} ipython3 ---- -mystnb: - figure: - caption: "Price Level and Price of Gold (log scale)" - name: fig9 ---- -# Create the figure and plot -plt.figure() -x = np.arange(1789 + 10/12, 1796 + 5/12, 1/12) -h, = plt.plot(x, 1. / data7.iloc[:, 0], linestyle='--') -h, = plt.plot(x, 1. / data7.iloc[:, 1], color='r') - -# Set properties of the plot -plt.gca().tick_params(labelsize=12) -plt.yscale('log') -plt.xlim([1789 + 10/12, 1796 + 5/12]) -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) - -# Add vertical lines -plt.axvline(x=1793 + 6.5/12, linestyle='-', linewidth=0.8, color='orange') -plt.axvline(x=1794 + 6.5/12, linestyle='-', linewidth=0.8, color='purple') - -# Add text -plt.text(1793.75, 120, 'Terror', fontsize=12) -plt.text(1795, 2.8, 'price level', fontsize=12) -plt.text(1794.9, 40, 'gold', fontsize=12) - - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig9.pdf', dpi=600) -``` - -TO TEACH TOM: By staring at {numref}`fig9` carefully - - -## Figure 11 - - - - -```{code-cell} ipython3 ---- -mystnb: - figure: - caption: "Spending (blue) and Revenues (orange), (real values)" - name: fig11 ---- -# Read data from Excel file -data11 = pd.read_excel('datasets/assignat.xlsx', sheet_name='Budgets', usecols='J:K', skiprows=22, nrows=52, header=None) - -# Prepare the x-axis data -x_data = np.concatenate([ - np.arange(1791, 1794 + 8/12, 1/12), - np.arange(1794 + 9/12, 1795 + 3/12, 1/12) -]) - -# Remove NaN values from the data -data11_clean = data11.dropna() - -# Plot the data -plt.figure() -h = plt.plot(x_data, data11_clean.values[:, 0], linewidth=0.8) -h = plt.plot(x_data, data11_clean.values[:, 1], '--', linewidth=0.8) - - - -# Set plot properties -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) -plt.gca().set_facecolor('white') -plt.gca().tick_params(axis='both', which='major', labelsize=12) -plt.xlim([1791, 1795 + 3/12]) -plt.xticks(np.arange(1791, 1796)) -plt.yticks(np.arange(0, 201, 20)) - -# Set the y-axis label -plt.ylabel('millions of livres', fontsize=12) - - - -plt.tight_layout() -plt.show() - -#plt.savefig('frfinfig11.pdf', dpi=600) -``` -TO TEACH TOM: By staring at {numref}`fig11` carefully - - -## Figure 12 - - -```{code-cell} ipython3 -# Read data from Excel file -data12 = pd.read_excel('datasets/assignat.xlsx', sheet_name='seignor', usecols='F', skiprows=6, nrows=75, header=None).squeeze() - - -# Create a figure and plot the data -plt.figure() -plt.plot(pd.date_range(start='1790', periods=len(data12), freq='M'), data12, linewidth=0.8) - -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) - -plt.axhline(y=472.42/12, color='r', linestyle=':') -plt.xticks(ticks=pd.date_range(start='1790', end='1796', freq='AS'), labels=range(1790, 1797)) -plt.xlim(pd.Timestamp('1791'), pd.Timestamp('1796-02') + pd.DateOffset(months=2)) -plt.ylabel('millions of livres', fontsize=12) -plt.text(pd.Timestamp('1793-11'), 39.5, 'revenues in 1788', verticalalignment='top', fontsize=12) - - -plt.tight_layout() -plt.show() - -#plt.savefig('frfinfig12.pdf', dpi=600) -``` - - -## Figure 13 - - -```{code-cell} ipython3 -# Read data from Excel file -data13 = pd.read_excel('datasets/assignat.xlsx', sheet_name='Exchge', usecols='P:T', skiprows=3, nrows=502, header=None) - -# Plot the last column of the data -plt.figure() -plt.plot(data13.iloc[:, -1], linewidth=0.8) - -# Set properties of the plot -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) -plt.gca().set_xlim([1, len(data13)]) - -# Set x-ticks and x-tick labels -ttt = np.arange(1, len(data13) + 1) -plt.xticks(ttt[~np.isnan(data13.iloc[:, 0])], - ['Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec', 'Jan', 'Feb', - 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep']) - -# Add text to the plot -plt.text(1, 120, '1795', fontsize=12, ha='center') -plt.text(262, 120, '1796', fontsize=12, ha='center') - -# Draw a horizontal line and add text -plt.axhline(y=186.7, color='red', linestyle='-', linewidth=0.8) -plt.text(150, 190, 'silver parity', fontsize=12) - -# Add an annotation with an arrow -plt.annotate('end of the assignat', xy=(340, 172), xytext=(380, 160), - arrowprops=dict(facecolor='black', arrowstyle='->'), fontsize=12) - - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig13.pdf', dpi=600) -``` - - -## Figure 14 - - -```{code-cell} ipython3 -# figure 14 -data14 = pd.read_excel('datasets/assignat.xlsx', sheet_name='Post-95', usecols='I', skiprows=9, nrows=91, header=None).squeeze() -data14a = pd.read_excel('datasets/assignat.xlsx', sheet_name='Post-95', usecols='F', skiprows=100, nrows=151, header=None).squeeze() - -plt.figure() -h = plt.plot(data14, '*-', markersize=2, linewidth=0.8) -plt.plot(np.concatenate([np.full(data14.shape, np.nan), data14a]), linewidth=0.8) -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) -plt.gca().set_xticks(range(20, 237, 36)) -plt.gca().set_xticklabels(range(1796, 1803)) -plt.xlabel('*: Before the 2/3 bankruptcy') -plt.ylabel('Francs') - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig14.pdf', dpi=600) -``` - - -## Figure 15 - - -```{code-cell} ipython3 -# figure 15 -data15 = pd.read_excel('datasets/assignat.xlsx', sheet_name='Post-95', usecols='N', skiprows=4, nrows=88, header=None).squeeze() - -plt.figure() -h = plt.plot(range(2, 90), data15, '*-', linewidth=0.8) -plt.setp(h, markersize=2) -plt.gca().spines['top'].set_visible(False) -plt.gca().spines['right'].set_visible(False) -plt.text(47.5, 11.4, '17 brumaire', horizontalalignment='left', fontsize=12) -plt.text(49.5, 14.75, '19 brumaire', horizontalalignment='left', fontsize=12) -plt.text(15, -1, 'Vendémiaire 8', fontsize=12, horizontalalignment='center') -plt.text(45, -1, 'Brumaire', fontsize=12, horizontalalignment='center') -plt.text(75, -1, 'Frimaire', fontsize=12, horizontalalignment='center') -plt.ylim([0, 25]) -plt.xticks([], []) -plt.ylabel('Francs') - -plt.tight_layout() -plt.show() -#plt.savefig('frfinfig15.pdf', dpi=600) -``` - -```{code-cell} ipython3 - -``` - - -## Fiscal Situation and Response of National Assembly - - -In response to a motion by Catholic Bishop Talleyrand, -the National Assembly confiscated and nationalized Church lands. - -But the National Assembly was dominated by free market advocates, not socialists. - -The National Assembly intended to use earnings from Church lands to service its national debt. - -To do this, it began to implement a ''privatization plan'' that would let it service its debt while -not raising taxes. - -Their plan involved issuing paper notes called ''assignats'' that entitled bearers to use them to purchase state lands. - -These paper notes would be ''as good as silver coins'' in the sense that both were acceptable means of payment in exchange for those (formerly) church lands. - -Finance Minister Necker and the Constituants planned -to solve the privatization problem **and** the debt problem simultaneously -by creating a new currency. - -They devised a scheme to raise revenues by auctioning -the confiscated lands, thereby withdrawing paper notes issued on the security of -the lands sold by the government. - - This ''tax-backed money'' scheme propelled the National Assembly into the domain of monetary experimentation. - -Records of their debates show -how members of the Assembly marshaled theory and evidence to assess the likely -effects of their innovation. - -They quoted David Hume and Adam Smith and cited John -Law's System of 1720 and the American experiences with paper money fifteen years -earlier as examples of how paper money schemes can go awry. - - -### Necker's plan and how it was tweaked - -Necker's original plan embodied two components: a national bank and a new -financial instrument, the ''assignat''. - - -Necker's national -bank was patterned after the Bank of England. He proposed to transform the *Caisse d'Escompte* into a national bank by granting it a monopoly on issuing -notes and marketing government debt. The *Caisse* was a -discount bank founded in 1776 whose main function was to discount commercial bills -and issue convertible notes. Although independent of the government in principle, -it had occasionally been used as a source of loans. Its notes had been declared -inconvertible in August 1788, and by the time of Necker's proposal, its reserves -were exhausted. Necker's plan placed the National Estates (as the Church lands -became known after the addition of the royal demesne) at the center of the financial -picture: a ''Bank of France'' would issue a $5\%$ security mortgaged on the prospective -receipts from the modest sale of some 400 millions' worth of National Estates in -the years 1791 to 1793. -```{note} - Only 170 million was to be used initially -to cover the deficits of 1789 and 1790. -``` - - -By mid-1790, members of the National Assembly had agreed to sell the National -Estates and to use the proceeds to service the debt in a ``tax-backed money'' scheme -```{note} -Debt service costs absorbed - over 60\% of French government expenditures. -``` - -The government would issue securities with which it would reimburse debt. - -The securities -were acceptable as payment for National Estates purchased at auctions; once received -in payment, they were to be burned. - -```{note} -The appendix to {cite}`sargent_velde1995` describes the -auction rules in detail. -``` -The Estates available for sale were thought to be worth about 2,400 -million, while the exactable debt (essentially fixed-term loans, unpaid arrears, -and liquidated offices) stood at about 2,000 million. The value of the land was -sufficient to let the Assembly retire all of the exactable debt and thereby eliminate -the interest payments on it. After lengthy debates, in August 1790, the Assembly set the denomination -and interest rate structure of the debt. - - -```{note} Two distinct -aspects of monetary theory help in thinking about the assignat plan. First, a system -beginning with a commodity standard typically has room for a once-and-for-all emission -of (an unbacked) paper currency that can replace the commodity money without generating -inflation. \citet{Sargent/Wallace:1983} describe models with this property. That -commodity money systems are wasteful underlies Milton Friedman's (1960) TOM:ADD REFERENCE preference -for a fiat money regime over a commodity money. Second, in a small country on a -commodity money system that starts with restrictions on intermediation, those restrictions -can be relaxed by letting the government issue bank notes on the security of safe -private indebtedness, while leaving bank notes convertible into gold at par. See -Adam Smith and Sargent and Wallace (1982) for expressions of this idea. TOM: ADD REFERENCES HEREAND IN BIBTEX FILE. -``` - - -```{note} -The -National Assembly debated many now classic questions in monetary economics. Under -what conditions would money creation generate inflation, with what consequences -for business conditions? Distinctions were made between issue of money to pay off -debt, on one hand, and monetization of deficits, on the other. Would *assignats* be akin -to notes emitted under a real bills regime, and cause loss of specie, or would -they circulate alongside specie, thus increasing the money stock? Would inflation -affect real wages? How would it impact foreign trade, competitiveness of French -industry and agriculture, balance of trade, foreign exchange? -``` diff --git a/lectures/greek_square.md b/lectures/greek_square.md index d7c13887..8bf4a982 100644 --- a/lectures/greek_square.md +++ b/lectures/greek_square.md @@ -4,25 +4,39 @@ jupytext: extension: .md format_name: myst format_version: 0.13 - jupytext_version: 1.14.4 + jupytext_version: 1.16.1 kernelspec: display_name: Python 3 (ipykernel) language: python name: python3 --- -+++ {"user_expressions": []} - # Computing Square Roots ## Introduction -This lectures provides an example of **invariant subspace** methods for analyzing linear difference equations. +Chapter 24 of {cite}`russell2004history` about early Greek mathematics and astronomy contains this +fascinating passage: + + ```{epigraph} + The square root of 2, which was the first irrational to be discovered, was known to the early Pythagoreans, and ingenious methods of approximating to its value were discovered. The best was as follows: Form two columns of numbers, which we will call the $a$'s and the $b$'s; each starts with a $1$. The next $a$, at each stage, is formed by adding the last $a$ and the $b$ already obtained; the next $b$ is formed by adding twice the previous $a$ to the previous $b$. The first 6 pairs so obtained are $(1,1), (2,3), (5,7), (12,17), (29,41), (70,99)$. In each pair, $2 a - b$ is $1$ or $-1$. Thus $b/a$ is nearly the square root of two, and at each fresh step it gets nearer. For instance, the reader may satisy himself that the square of $99/70$ is very nearly equal to $2$. + ``` + +This lecture drills down and studies this ancient method for computing square roots by using some of the matrix algebra that we've learned in earlier quantecon lectures. + +In particular, this lecture can be viewed as a sequel to {doc}`eigen_I`. + +It provides an example of how eigenvectors isolate *invariant subspaces* that help construct and analyze solutions of linear difference equations. -These methods are applied throughout applied economic dynamics, for example, in this QuantEcon lecture {doc}`money financed government deficits and inflation ` +When vector $x_t$ starts in an invariant subspace, iterating the different equation keeps $x_{t+j}$ +in that subspace for all $j \geq 1$. -Our approach in this lecture is to illustrate the method with an ancient example, one that ancient Greek mathematicians used to compute square roots of positive integers. +Invariant subspace methods are used throughout applied economic dynamics, for example, in the lecture {doc}`money_inflation`. + +Our approach here is to illustrate the method with an ancient example, one that ancient Greek mathematicians used to compute square roots of positive integers. + +## Perfect squares and irrational numbers An integer is called a **perfect square** if its square root is also an integer. @@ -38,19 +52,22 @@ The ancient Greeks invented an algorithm to compute square roots of integers, in Their method involved - * computing a particular sequence of integers $\{y_t\}_{t=0}^\infty$ + * computing a particular sequence of integers $\{y_t\}_{t=0}^\infty$; - * computing $\lim_{t \rightarrow \infty} \left(\frac{y_{t+1}}{y_t}\right) = \bar r$ + * computing $\lim_{t \rightarrow \infty} \left(\frac{y_{t+1}}{y_t}\right) = \bar r$; - * deducing the desired square root from $\bar r$ + * deducing the desired square root from $\bar r$. In this lecture, we'll describe this method. We'll also use invariant subspaces to describe variations on this method that are faster. -## Primer on second order linear difference equation +## Second-order linear difference equations -Consider the following second-order linear difference equation +Before telling how the ancient Greeks computed square roots, we'll provide a quick introduction +to second-order linear difference equations. + +We'll study the following second-order linear difference equation $$ y_t = a_1 y_{t-1} + a_2 y_{t-2}, \quad t \geq 0 @@ -58,13 +75,31 @@ $$ (eq:2diff1) where $(y_{-1}, y_{-2})$ is a pair of given initial conditions. -We want to find expressions for $y_t, t \geq 0$ as functions of the initial conditions $(y_{-1}, y_{-2})$: +Equation {eq}`eq:2diff1` is actually an infinite number of linear equations in the sequence +$\{y_t\}_{t=0}^\infty$. + +There is one equation each for $t = 0, 1, 2, \ldots$. + +We could follow an approach taken in the lecture on {doc}`present values` and stack all of these equations into a single matrix equation that we would then solve by using matrix inversion. + +```{note} +In the present instance, the matrix equation would multiply a countably infinite dimensional square matrix by a countably infinite dimensional vector. With some qualifications, matrix multiplication and inversion tools apply to such an equation. +``` + +But we won't pursue that approach here. + + +Instead, we'll seek to find a time-invariant function that *solves* our difference equation, meaning +that it provides a formula for a $\{y_t\}_{t=0}^\infty$ sequence that satisfies +equation {eq}`eq:2diff1` for each $t \geq 0$. + +We seek an expression for $y_t, t \geq 0$ as functions of the initial conditions $(y_{-1}, y_{-2})$: $$ -y_t = g((y_{-1}, y_{-2});t), \quad t \geq 0 +y_t = g((y_{-1}, y_{-2});t), \quad t \geq 0. $$ (eq:2diff2) -We call such a function $g$ a **solution** of the difference equation {eq}`eq:2diff1`. +We call such a function $g$ a *solution* of the difference equation {eq}`eq:2diff1`. One way to discover a solution is to use a guess and verify method. @@ -81,7 +116,7 @@ For initial condition that satisfy {eq}`eq:2diff3` equation {eq}`eq:2diff1` impllies that $$ -y_0 = \left(a_1 + \frac{a_2}{\delta}\right) y_{-1} +y_0 = \left(a_1 + \frac{a_2}{\delta}\right) y_{-1}. $$ (eq:2diff4) We want @@ -90,16 +125,16 @@ $$ \left(a_1 + \frac{a_2}{\delta}\right) = \delta $$ (eq:2diff5) -which we can rewrite as the **characteristic equation** +which we can rewrite as the *characteristic equation* $$ -\delta^2 - a_1 \delta - a_2 = 0 +\delta^2 - a_1 \delta - a_2 = 0. $$ (eq:2diff6) Applying the quadratic formula to solve for the roots of {eq}`eq:2diff6` we find that $$ -\delta = \frac{ a_1 \pm \sqrt{a_1^2 + 4 a_2}}{2} +\delta = \frac{ a_1 \pm \sqrt{a_1^2 + 4 a_2}}{2}. $$ (eq:2diff7) For either of the two $\delta$'s that satisfy equation {eq}`eq:2diff7`, @@ -109,9 +144,13 @@ $$ y_t = \delta^t y_0 , \forall t \geq 0 $$ (eq:2diff8) -and $y_0 = a_1 y_{-1} + a_2 y_{-2}$ +provided that we set -The **general** solution of difference equation {eq}`eq:2diff1` takes the form +$$ +y_0 = \delta y_{-1} . +$$ + +The *general* solution of difference equation {eq}`eq:2diff1` takes the form $$ y_t = \eta_1 \delta_1^t + \eta_2 \delta_2^t @@ -137,33 +176,34 @@ If we choose $(y_{-1}, y_{-2})$ to set $(\eta_1, \eta_2) = (1, 0)$, then $y_t = If we choose $(y_{-1}, y_{-2})$ to set $(\eta_1, \eta_2) = (0, 1)$, then $y_t = \delta_2^t$ for all $t \geq 0$. +Soon we'll relate the preceding calculations to components an eigen decomposition of a transition matrix that represents difference equation {eq}`eq:2diff1` in a very convenient way. + +We'll turn to that after we describe how Ancient Greeks figured out how to compute square roots of positive integers that are not perfect squares. + -## Setup +## Algorithm of the Ancient Greeks -Let $\sigma$ be a positive integer greater than $1$ +Let $\sigma$ be a positive integer greater than $1$. -So $\sigma \in {\mathcal I} \equiv \{2, 3, \ldots \}$ +So $\sigma \in {\mathcal I} \equiv \{2, 3, \ldots \}$. We want an algorithm to compute the square root of $\sigma \in {\mathcal I}$. -If $\sqrt{\sigma} \in {\mathcal I}$, $\sigma $ is said to be a **perfect square**. +If $\sqrt{\sigma} \in {\mathcal I}$, $\sigma $ is said to be a *perfect square*. If $\sqrt{\sigma} \not\in {\mathcal I}$, it turns out that it is irrational. Ancient Greeks used a recursive algorithm to compute square roots of integers that are not perfect squares. -The algorithm iterates on a second order linear difference equation in the sequence $\{y_t\}_{t=0}^\infty$: +The algorithm iterates on a second-order linear difference equation in the sequence $\{y_t\}_{t=0}^\infty$: $$ y_{t} = 2 y_{t-1} - (1 - \sigma) y_{t-2}, \quad t \geq 0 $$ (eq:second_order) -together with a pair of integers that are initial conditions for $y_{-1}, y_{-2}$. - -First, we'll deploy some techniques for solving difference equations that are also deployed in this QuantEcon lecture about the multiplier-accelerator model: - - +together with a pair of integers that are initial conditions for $y_{-1}, y_{-2}$. +First, we'll deploy some techniques for solving the difference equations that are also deployed in {doc}`dynam:samuelson`. The characteristic equation associated with difference equation {eq}`eq:second_order` is @@ -171,11 +211,9 @@ $$ c(x) \equiv x^2 - 2 x + (1 - \sigma) = 0 $$ (eq:cha_eq0) -+++ +(Notice how this is an instance of equation {eq}`eq:2diff6` above.) -(This is an instance of equation {eq}`eq:2diff6` above.) - -If we factor the right side of the equation {eq}`eq:cha_eq0`, we obtain +Factoring the right side of equation {eq}`eq:cha_eq0`, we obtain $$ c(x)= (x - \lambda_1) (x-\lambda_2) = 0 @@ -197,11 +235,11 @@ By applying the quadratic formula to solve for the roots the characteristic equ {eq}`eq:cha_eq0`, we find that $$ -\lambda_1 = 1 + \sqrt{\sigma}, \quad \lambda_2 = 1 - \sqrt{\sigma} +\lambda_1 = 1 + \sqrt{\sigma}, \quad \lambda_2 = 1 - \sqrt{\sigma}. $$ (eq:secretweapon) -Formulas {eq}`eq:secretweapon` indicate that $\lambda_1$ and $\lambda_2$ are both simple functions -of a single variable, namely, $\sqrt{\sigma}$, the object that some Ancient Greeks wanted to compute. +Formulas {eq}`eq:secretweapon` indicate that $\lambda_1$ and $\lambda_2$ are each functions +of a single variable, namely, $\sqrt{\sigma}$, the object that we along with some Ancient Greeks want to compute. Ancient Greeks had an indirect way of exploiting this fact to compute square roots of a positive integer. @@ -214,7 +252,7 @@ $$ y_t = \lambda_1^t \eta_1 + \lambda_2^t \eta_2 $$ -where $\eta_1$ and $\eta_2$ are chosen to satisfy the prescribed initial conditions $y_{-1}, y_{-2}$: +where $\eta_1$ and $\eta_2$ are chosen to satisfy prescribed initial conditions $y_{-1}, y_{-2}$: $$ \begin{aligned} @@ -225,17 +263,17 @@ $$(eq:leq_sq) System {eq}`eq:leq_sq` of simultaneous linear equations will play a big role in the remainder of this lecture. -Since $\lambda_1 = 1 + \sqrt{\sigma} > 1 > \lambda_2 = 1 - \sqrt{\sigma} $ -it follows that for **almost all** (but not all) initial conditions +Since $\lambda_1 = 1 + \sqrt{\sigma} > 1 > \lambda_2 = 1 - \sqrt{\sigma} $, +it follows that for *almost all* (but not all) initial conditions $$ -\lim_{t \rightarrow \infty} \left(\frac{y_{t+1}}{y_t}\right) = 1 + \sqrt{\sigma} +\lim_{t \rightarrow \infty} \left(\frac{y_{t+1}}{y_t}\right) = 1 + \sqrt{\sigma}. $$ Thus, $$ -\sqrt{\sigma} = \lim_{t \rightarrow \infty} \left(\frac{y_{t+1}}{y_t}\right) - 1 +\sqrt{\sigma} = \lim_{t \rightarrow \infty} \left(\frac{y_{t+1}}{y_t}\right) - 1. $$ However, notice that if $\eta_1 = 0$, then @@ -247,7 +285,7 @@ $$ so that $$ -\sqrt{\sigma} = 1 - \lim_{t \rightarrow \infty} \left(\frac{y_{t+1}}{y_t}\right) +\sqrt{\sigma} = 1 - \lim_{t \rightarrow \infty} \left(\frac{y_{t+1}}{y_t}\right). $$ Actually, if $\eta_1 =0$, it follows that @@ -270,40 +308,38 @@ so again, convergence is immediate, and we have no need to compute a limit. System {eq}`eq:leq_sq` of simultaneous linear equations can be used in various ways. - * we can take $y_{-1}, y_{-2}$ as given initial conditions and solve for $\eta_1, \eta_2$ + * we can take $y_{-1}, y_{-2}$ as given initial conditions and solve for $\eta_1, \eta_2$; - * we can instead take $\eta_1, \eta_2$ as given and solve for initial conditions $y_{-1}, y_{-2}$ + * we can instead take $\eta_1, \eta_2$ as given and solve for initial conditions $y_{-1}, y_{-2}$. Notice how we used the second approach above when we set $\eta_1, \eta_2$ either to $(0, 1)$, for example, or $(1, 0)$, for example. -In taking this second approach, we were in effect finding an **invariant subspace** of ${\bf R}^2$. +In taking this second approach, we constructed an *invariant subspace* of ${\bf R}^2$. Here is what is going on. -For $ t \geq 0$ and for most pairs of initial conditions $(y_{-1}, y_{-2}) \in {\bf R}^2$ for equation {eq}`eq:second_order', $y_t$ can be expressed as a linear combination of $y_{t-1}$ and $y_{t-2}$. +For $ t \geq 0$ and for most pairs of initial conditions $(y_{-1}, y_{-2}) \in {\bf R}^2$ for equation {eq}`eq:second_order`, $y_t$ can be expressed as a linear combination of $y_{t-1}$ and $y_{t-2}$. But for some special initial conditions $(y_{-1}, y_{-2}) \in {\bf R}^2$, $y_t$ can be expressed as a linear function of $y_{t-1}$ only. These special initial conditions require that $y_{-1}$ be a linear function of $y_{-2}$. -We'll study these special initial conditions soon. But first let's write some Python code to iterate on equation {eq}`eq:second_order` starting from an arbitrary $(y_{-1}, y_{-2}) \in {\bf R}^2$. +We'll study these special initial conditions soon. + +But first let's write some Python code to iterate on equation {eq}`eq:second_order` starting from an arbitrary $(y_{-1}, y_{-2}) \in {\bf R}^2$. ## Implementation We now implement the above algorithm to compute the square root of $\sigma$. - In this lecture, we use the following import: ```{code-cell} ipython3 -:tags: [] - import numpy as np +import matplotlib.pyplot as plt ``` ```{code-cell} ipython3 -:tags: [] - def solve_λs(coefs): # Calculate the roots using numpy.roots λs = np.roots(coefs) @@ -359,16 +395,12 @@ print(f"sqrt({σ}) is approximately {sqrt_σ:.5f} (error: {dev:.5f})") Now we consider cases where $(\eta_1, \eta_2) = (0, 1)$ and $(\eta_1, \eta_2) = (1, 0)$ ```{code-cell} ipython3 -:tags: [] - # Compute λ_1, λ_2 λ_1, λ_2 = solve_λs(coefs) print(f'Roots for the characteristic equation are ({λ_1:.5f}, {λ_2:.5f}))') ``` ```{code-cell} ipython3 -:tags: [] - # Case 1: η_1, η_2 = (0, 1) ηs = (0, 1) @@ -380,9 +412,7 @@ print(f"For η_1, η_2 = (0, 1), sqrt_σ = {sqrt_σ:.5f}") ``` ```{code-cell} ipython3 -:tags: [] - -# Case 2: η_1, η_2 = (0, 1) +# Case 2: η_1, η_2 = (1, 0) ηs = (1, 0) sqrt_σ = y(1, ηs) / y(0, ηs) - 1 @@ -391,9 +421,7 @@ print(f"For η_1, η_2 = (1, 0), sqrt_σ = {sqrt_σ:.5f}") We find that convergence is immediate. -+++ - -Let's represent the preceding analysis by vectorizing our second order difference equation {eq}`eq:second_order` and then using eigendecompositions of a state transition matrix. +Next, we'll represent the preceding analysis by first vectorizing our second-order difference equation {eq}`eq:second_order` and then using eigendecompositions of an associated state transition matrix. ## Vectorizing the difference equation @@ -433,13 +461,112 @@ $$ x_{t+1} = V \Lambda V^{-1} x_t $$ -Define +Now we implement the algorithm above. + +First we write a function that iterates $M$ + +```{code-cell} ipython3 +def iterate_M(x_0, M, num_steps, dtype=np.float64): + + # Eigendecomposition of M + Λ, V = np.linalg.eig(M) + V_inv = np.linalg.inv(V) + + # Initialize the array to store results + xs = np.zeros((x_0.shape[0], + num_steps + 1)) + + # Perform the iterations + xs[:, 0] = x_0 + for t in range(num_steps): + xs[:, t + 1] = M @ xs[:, t] + + return xs, Λ, V, V_inv + +# Define the state transition matrix M +M = np.array([ + [2, -(1 - σ)], + [1, 0]]) + +# Initial condition vector x_0 +x_0 = np.array([2, 2]) + +# Perform the iteration +xs, Λ, V, V_inv = iterate_M(x_0, M, num_steps=100) + +print(f"eigenvalues:\n{Λ}") +print(f"eigenvectors:\n{V}") +print(f"inverse eigenvectors:\n{V_inv}") +``` + +Let's compare the eigenvalues to the roots {eq}`eq:secretweapon` of equation +{eq}`eq:cha_eq0` that we computed above. + +```{code-cell} ipython3 +roots = solve_λs((1, -2, (1 - σ))) +print(f"roots: {np.round(roots, 8)}") +``` + +Hence we confirmed {eq}`eq:eigen_sqrt`. + +Information about the square root we are after is also contained +in the two eigenvectors. + +Indeed, each eigenvector is just a two-dimensional subspace of ${\mathbb R}^3$ pinned down by dynamics of the form + +$$ +y_{t} = \lambda_i y_{t-1}, \quad i = 1, 2 +$$ (eq:invariantsub101) + +that we encountered above in equation {eq}`eq:2diff8` above. + +In equation {eq}`eq:invariantsub101`, the $i$th $\lambda_i$ equals the $V_{i, 1}/V_{i,2}$. + +The following graph verifies this for our example. + +```{code-cell} ipython3 +:tags: [hide-input] + +# Plotting the eigenvectors +plt.figure(figsize=(8, 8)) + +plt.quiver(0, 0, V[0, 0], V[1, 0], angles='xy', scale_units='xy', + scale=1, color='C0', label=fr'$\lambda_1={np.round(Λ[0], 4)}$') +plt.quiver(0, 0, V[0, 1], V[1, 1], angles='xy', scale_units='xy', + scale=1, color='C1', label=fr'$\lambda_2={np.round(Λ[1], 4)}$') + +# Annotating the slopes +plt.text(V[0, 0]-0.5, V[1, 0]*1.2, + r'slope=$\frac{V_{1,1}}{V_{1,2}}=$'+f'{np.round(V[0, 0] / V[1, 0], 4)}', + fontsize=12, color='C0') +plt.text(V[0, 1]-0.5, V[1, 1]*1.2, + r'slope=$\frac{V_{2,1}}{V_{2,2}}=$'+f'{np.round(V[0, 1] / V[1, 1], 4)}', + fontsize=12, color='C1') + +# Adding labels +plt.axhline(0, color='grey', linewidth=0.5, alpha=0.4) +plt.axvline(0, color='grey', linewidth=0.5, alpha=0.4) +plt.legend() + +plt.xlim(-1.5, 1.5) +plt.ylim(-1.5, 1.5) +plt.show() +``` + +## Invariant subspace approach + +The preceding calculation indicates that we can use the eigenvectors $V$ to construct 2-dimensional *invariant subspaces*. + +We'll pursue that possibility now. + +Define the transformed variables + $$ x_t^* = V^{-1} x_t $$ -We can recover $x_t$ from $x_t^*$: +Evidently, we can recover $x_t$ from $x_t^*$: $$ x_t = V x_t^* @@ -453,23 +580,21 @@ Let $$ V = \begin{bmatrix} V_{1,1} & V_{1,2} \cr - V_{2,2} & V_{2,2} \end{bmatrix}, \quad + V_{2,1} & V_{2,2} \end{bmatrix}, \quad V^{-1} = \begin{bmatrix} V^{1,1} & V^{1,2} \cr - V^{2,2} & V^{2,2} \end{bmatrix} + V^{2,1} & V^{2,2} \end{bmatrix} $$ Notice that it follows from $$ \begin{bmatrix} V^{1,1} & V^{1,2} \cr - V^{2,2} & V^{2,2} \end{bmatrix} \begin{bmatrix} V_{1,1} & V_{1,2} \cr - V_{2,2} & V_{2,2} \end{bmatrix} = \begin{bmatrix} 1 & 0 \cr 0 & 1 \end{bmatrix} + V^{2,1} & V^{2,2} \end{bmatrix} \begin{bmatrix} V_{1,1} & V_{1,2} \cr + V_{2,1} & V_{2,2} \end{bmatrix} = \begin{bmatrix} 1 & 0 \cr 0 & 1 \end{bmatrix} $$ that - - $$ V^{2,1} V_{1,1} + V^{2,2} V_{2,1} = 0 $$ @@ -477,7 +602,7 @@ $$ and $$ -V^{1,1}V_{1,2} + V^{1,2} V_{2,2} = 0 +V^{1,1}V_{1,2} + V^{1,2} V_{2,2} = 0. $$ These equations will be very useful soon. @@ -493,14 +618,14 @@ $$ To deactivate $\lambda_1$ we want to set $$ -x_{1,0}^* = 0 +x_{1,0}^* = 0. $$ This can be achieved by setting $$ -x_{2,0} = -( V^{1,2})^{-1} V^{1,1} = V_{2,1} V_{1,1}^{-1} x_{1,0}. +x_{2,0} = -( V^{1,2})^{-1} V^{1,1} x_{1,0} = V_{2,2} V_{1,2}^{-1} x_{1,0}. $$ (eq:deactivate1) To deactivate $\lambda_2$, we want to set @@ -512,58 +637,164 @@ $$ This can be achieved by setting $$ -x_{2,0} = -(V^{2,2})^{-1} V^{2,1} = V_{2,1} V_{1,1}^{-1} x_{1,0} +x_{2,0} = -(V^{2,2})^{-1} V^{2,1} x_{1,0} = V_{2,1} V_{1,1}^{-1} x_{1,0}. $$ (eq:deactivate2) +Let's verify {eq}`eq:deactivate1` and {eq}`eq:deactivate2` below -We shall encounter equations very similar to {eq}`eq:deactivate1` and {eq}`eq:deactivate2` -in this QuantEcon lecture {doc}`money financed government deficits and inflation ` -and in many other places in dynamic economic theory. +To deactivate $\lambda_1$ we use {eq}`eq:deactivate1` -### Implementation +```{code-cell} ipython3 +xd_1 = np.array((x_0[0], + V[1,1]/V[0,1] * x_0[0]), + dtype=np.float64) -Now we implement the algorithm above. +# Compute x_{1,0}^* +np.round(V_inv @ xd_1, 8) +``` -First we write a function that iterates $M$ +We find $x_{1,0}^* = 0$. + +Now we deactivate $\lambda_2$ using {eq}`eq:deactivate2` ```{code-cell} ipython3 -:tags: [] +xd_2 = np.array((x_0[0], + V[1,0]/V[0,0] * x_0[0]), + dtype=np.float64) -def iterate_M(x_0, M, num_steps): - # Eigendecomposition of M - Λ, V = np.linalg.eig(M) - V_inv = np.linalg.inv(V) - - print(f"eigenvalue:\n{Λ}") - print(f"eigenvector:\n{V}") - - # Initialize the array to store results - x = np.zeros((x_0.shape[0], num_steps)) - - # Perform the iterations - for t in range(num_steps): - x[:, t] = V @ np.diag(Λ**t) @ V_inv @ x_0 - - return x +# Compute x_{2,0}^* +np.round(V_inv @ xd_2, 8) +``` -# Define the state transition matrix M -M = np.array([[2, -(1 - σ)], - [1, 0]]) +We find $x_{2,0}^* = 0$. -# Initial condition vector x_0 -x_0 = np.array([1, 0]) +```{code-cell} ipython3 +# Simulate with muted λ1 λ2. +num_steps = 10 +xs_λ1 = iterate_M(xd_1, M, num_steps)[0] +xs_λ2 = iterate_M(xd_2, M, num_steps)[0] + +# Compute ratios y_t / y_{t-1} +ratios_λ1 = xs_λ1[1, 1:] / xs_λ1[1, :-1] +ratios_λ2 = xs_λ2[1, 1:] / xs_λ2[1, :-1] +``` -# Perform the iteration -xs = iterate_M(x_0, M, num_steps=100) +The following graph shows the ratios $y_t / y_{t-1}$ for the two cases. + +We find that the ratios converge to $\lambda_2$ in the first case and $\lambda_1$ in the second case. + +```{code-cell} ipython3 +:tags: [hide-input] + +# Plot the ratios for y_t / y_{t-1} +fig, axs = plt.subplots(1, 2, figsize=(12, 6), dpi=500) + +# First subplot +axs[0].plot(np.round(ratios_λ1, 6), + label=r'$\frac{y_t}{y_{t-1}}$', linewidth=3) +axs[0].axhline(y=Λ[1], color='red', linestyle='--', + label='$\lambda_2$', alpha=0.5) +axs[0].set_xlabel('t', size=18) +axs[0].set_ylabel(r'$\frac{y_t}{y_{t-1}}$', size=18) +axs[0].set_title(r'$\frac{y_t}{y_{t-1}}$ after Muting $\lambda_1$', + size=13) +axs[0].legend() + +# Second subplot +axs[1].plot(ratios_λ2, label=r'$\frac{y_t}{y_{t-1}}$', + linewidth=3) +axs[1].axhline(y=Λ[0], color='green', linestyle='--', + label='$\lambda_1$', alpha=0.5) +axs[1].set_xlabel('t', size=18) +axs[1].set_ylabel(r'$\frac{y_t}{y_{t-1}}$', size=18) +axs[1].set_title(r'$\frac{y_t}{y_{t-1}}$ after Muting $\lambda_2$', + size=13) +axs[1].legend() + +plt.tight_layout() +plt.show() ``` -Compare the eigenvector to the roots we obtained above +## Concluding remarks + +This lecture sets the stage for many other applications of the *invariant subspace* methods. + +All of these exploit very similar equations based on eigen decompositions. + +We shall encounter equations very similar to {eq}`eq:deactivate1` and {eq}`eq:deactivate2` +in {doc}`money_inflation` and in many other places in dynamic economic theory. + + +## Exercise + +```{exercise-start} +:label: greek_square_ex_a +``` +Please use matrix algebra to formulate the method described by Bertrand Russell at the beginning of this lecture. + +1. Define a state vector $x_t = \begin{bmatrix} a_t \cr b_t \end{bmatrix}$. +2. Formulate a first-order vector difference equation for $x_t$ of the form $x_{t+1} = A x_t$ and +compute the matrix $A$. +3. Use the system $x_{t+1} = A x_t$ to replicate the sequence of $a_t$'s and $b_t$'s described by Bertrand Russell. +4. Compute the eigenvectors and eigenvalues of $A$ and compare them to corresponding objects computed in the text of this lecture. + +```{exercise-end} +``` + +```{solution-start} greek_square_ex_a +:class: dropdown +``` + +Here is one soluition. + +According to the quote, we can formulate + +$$ +\begin{aligned} +a_{t+1} &= a_t + b_t \\ +b_{t+1} &= 2a_t + b_t +\end{aligned} +$$ (eq:gs_ex1system) + +with $x_0 = \begin{bmatrix} a_0 \cr b_0 \end{bmatrix} = \begin{bmatrix} 1 \cr 1 \end{bmatrix}$ + +By {eq}`eq:gs_ex1system`, we can write matrix $A$ as + +$$ +A = \begin{bmatrix} 1 & 1 \cr + 2 & 1 \end{bmatrix} +$$ + +Then $x_{t+1} = A x_t$ for $t \in \{0, \dots, 5\}$ ```{code-cell} ipython3 -:tags: [] +# Define the matrix A +A = np.array([[1, 1], + [2, 1]]) -roots = solve_λs((1, -2, (1 - σ))) -print(f"roots: {np.round(roots, 8)}") +# Initial vector x_0 +x_0 = np.array([1, 1]) + +# Number of iterations +n = 6 + +# Generate the sequence +xs = np.array([x_0]) +x_t = x_0 +for _ in range(1, n): + x_t = A @ x_t + xs = np.vstack([xs, x_t]) + +# Print the sequence +for i, (a_t, b_t) in enumerate(xs): + print(f"Iter {i}: a_t = {a_t}, b_t = {b_t}") + +# Compute eigenvalues and eigenvectors of A +eigenvalues, eigenvectors = np.linalg.eig(A) + +print(f'\nEigenvalues:\n{eigenvalues}') +print(f'\nEigenvectors:\n{eigenvectors}') ``` -Hence we confirmed {eq}`eq:eigen_sqrt`. +```{solution-end} +``` diff --git a/lectures/heavy_tails.md b/lectures/heavy_tails.md index e4c32c6f..f5636fcc 100644 --- a/lectures/heavy_tails.md +++ b/lectures/heavy_tails.md @@ -45,7 +45,7 @@ In the natural sciences (and in more traditional economics courses), heavy-taile However, it turns out that heavy-tailed distributions play a crucial role in economics. -In fact many -- if not most -- of the important distributions in economics are heavy tailed. +In fact many -- if not most -- of the important distributions in economics are heavy-tailed. In this lecture we explain what heavy tails are and why they are -- or at least why they should be -- central to economic analysis. @@ -58,6 +58,9 @@ the natural sciences have "light tails." To explain this concept, let's look first at examples. +```{prf:example} +:label: ht_ex_nd + The classic example is the [normal distribution](https://en.wikipedia.org/wiki/Normal_distribution), which has density $$ @@ -73,6 +76,7 @@ respectively. As $x$ deviates from $\mu$, the value of $f(x)$ goes to zero extremely quickly. +``` We can see this when we plot the density and show a histogram of observations, as with the following code (which assumes $\mu=0$ and $\sigma=1$). @@ -277,6 +281,9 @@ The data we have just seen is said to be "heavy-tailed". With heavy-tailed distributions, extreme outcomes occur relatively frequently. +```{prf:example} +:label: ht_ex_od + Importantly, there are many examples of heavy-tailed distributions observed in economic and financial settings! @@ -292,6 +299,7 @@ The firm size distribution is also heavy-tailed The distribution of town and city sizes is heavy-tailed * Most towns and cities are small but some are very large. +``` Later in this lecture, we examine heavy tails in these distributions. @@ -828,7 +836,7 @@ plt.show() ### City size -Here are plots of the city size distribution for the US and Brazil in 2023 from world population review. +Here are plots of the city size distribution for the US and Brazil in 2023 from the World Population Review. The size is measured by population. @@ -943,7 +951,7 @@ One impact of heavy tails is that sample averages can be poor estimators of the underlying mean of the distribution. To understand this point better, recall {doc}`our earlier discussion ` -of the Law of Large Numbers, which considered IID $X_1, \ldots, X_n$ with common distribution $F$ +of the law of large numbers, which considered IID $X_1, \ldots, X_n$ with common distribution $F$ If $\mathbb E |X_i|$ is finite, then the sample mean $\bar X_n := \frac{1}{n} \sum_{i=1}^n X_i$ satisfies @@ -957,7 +965,7 @@ the sample mean $\bar X_n := \frac{1}{n} \sum_{i=1}^n X_i$ satisfies where $\mu := \mathbb E X_i = \int x F(dx)$ is the common mean of the sample. The condition $\mathbb E | X_i | = \int |x| F(dx) < \infty$ holds -in most cases but can fail if the distribution $F$ is very heavy tailed. +in most cases but can fail if the distribution $F$ is very heavy-tailed. For example, it fails for the Cauchy distribution. @@ -1006,7 +1014,7 @@ We return to this point in the exercises. We have now seen that 1. heavy tails are frequent in economics and -2. the Law of Large Numbers fails when tails are very heavy. +2. the law of large numbers fails when tails are very heavy. But what about in the real world? Do heavy tails matter? @@ -1261,7 +1269,7 @@ Present discounted value of tax revenue will be estimated by The Pareto distribution is assumed to take the form {eq}`pareto` with $\bar x = 1$ and $\alpha = 1.05$. -(The value the tail index $\alpha$ is plausible given the data {cite}`gabaix2016power`.) +(The value of the tail index $\alpha$ is plausible given the data {cite}`gabaix2016power`.) To make the lognormal option as similar as possible to the Pareto option, choose its parameters such that the mean and median of both distributions are the same. @@ -1315,7 +1323,7 @@ $$ which we solve for $\mu$ and $\sigma$ given $\alpha = 1.05$. -Here is code that generates the two samples, produces the violin plot and +Here is the code that generates the two samples, produces the violin plot and prints the mean and standard deviation of the two samples. ```{code-cell} ipython3 diff --git a/lectures/inequality.md b/lectures/inequality.md index f6c0ff67..b1ec0e11 100644 --- a/lectures/inequality.md +++ b/lectures/inequality.md @@ -18,19 +18,23 @@ kernelspec: In the lecture {doc}`long_run_growth` we studied how GDP per capita has changed for certain countries and regions. -Per capital GDP is important because it gives us an idea of average income for +Per capita GDP is important because it gives us an idea of average income for households in a given country. However, when we study income and wealth, averages are only part of the story. +```{prf:example} +:label: ie_ex_av + For example, imagine two societies, each with one million people, where * in the first society, the yearly income of one man is $100,000,000 and the income of the - others is zero + others are zero * in the second society, the yearly income of everyone is $100 These countries have the same income per capita (average income is $100) but the lives of the people will be very different (e.g., almost everyone in the first society is starving, even though one person is fabulously rich). +``` The example above suggests that we should go beyond simple averages when we study income and wealth. @@ -247,7 +251,7 @@ The following code block imports a subset of the dataset `SCF_plus` for 2016, which is derived from the [Survey of Consumer Finances](https://en.wikipedia.org/wiki/Survey_of_Consumer_Finances) (SCF). ```{code-cell} ipython3 -url = 'https://media.githubusercontent.com/media/QuantEcon/high_dim_data/main/SCF_plus/SCF_plus_mini.csv' +url = 'https://github.com/QuantEcon/high_dim_data/raw/main/SCF_plus/SCF_plus_mini.csv' df = pd.read_csv(url) df_income_wealth = df.dropna() ``` @@ -435,6 +439,8 @@ Let's examine the Gini coefficient in some simulations. The code below computes the Gini coefficient from a sample. +(code:gini-coefficient)= + ```{code-cell} ipython3 def gini_coefficient(y): @@ -481,6 +487,7 @@ You can check this by looking up the expression for the mean of a lognormal distribution. ```{code-cell} ipython3 +%%time k = 5 σ_vals = np.linspace(0.2, 4, k) n = 2_000 @@ -529,7 +536,7 @@ Let's look at the Gini coefficient for the distribution of income in the US. We will get pre-computed Gini coefficients (based on income) from the World Bank using the [wbgapi](https://blogs.worldbank.org/opendata/introducing-wbgapi-new-python-package-accessing-world-bank-data). -Let's use the `wbgapi` package we imported earlier to search the world bank data for Gini to find the Series ID. +Let's use the `wbgapi` package we imported earlier to search the World Bank data for Gini to find the Series ID. ```{code-cell} ipython3 wb.search("gini") @@ -616,51 +623,11 @@ We will use US data from the {ref}`Survey of Consumer Finances +[This notebook](https://github.com/QuantEcon/lecture-python-intro/tree/main/lectures/_static/lecture_specific/inequality/data.ipynb) can be used to compute this information over the full dataset. ```{code-cell} ipython3 -ginis = pd.read_csv("_static/lecture_specific/inequality/usa-gini-nwealth-tincome-lincome.csv", index_col='year') +data_url = 'https://github.com/QuantEcon/lecture-python-intro/raw/main/lectures/_static/lecture_specific/inequality/usa-gini-nwealth-tincome-lincome.csv' +ginis = pd.read_csv(data_url, index_col='year') ginis.head(n=5) ``` @@ -687,10 +654,6 @@ One possibility is that this change is mainly driven by technology. However, we will see below that not all advanced economies experienced similar growth of inequality. - - - - ### Cross-country comparisons of income inequality Earlier in this lecture we used `wbgapi` to get Gini data across many countries @@ -796,8 +759,9 @@ min_year = plot_data.year.min() max_year = plot_data.year.max() ``` -The time series for all three countries start and stop in different years. We will add a year mask to the data to -improve clarity in the chart including the different end years associated with each countries time series. +The time series for all three countries start and stop in different years. + +We will add a year mask to the data to improve clarity in the chart including the different end years associated with each country's time series. ```{code-cell} ipython3 labels = [1979, 1986, 1991, 1995, 2000, 2020, 2021, 2022] + \ @@ -824,7 +788,7 @@ fig.show() This figure is built using `plotly` and is {ref}` available on the website ` ``` -This plot shows that all three Western economies GDP per capita has grown over +This plot shows that all three Western economies' GDP per capita has grown over time with some fluctuations in the Gini coefficient. From the early 80's the United Kingdom and the US economies both saw increases @@ -1093,3 +1057,90 @@ plt.show() ```{solution-end} ``` + +```{exercise} +:label: inequality_ex3 + +The {ref}`code to compute the Gini coefficient is listed in the lecture above `. + +This code uses loops to calculate the coefficient based on income or wealth data. + +This function can be re-written using vectorization which will greatly improve the computational efficiency when using `python`. + +Re-write the function `gini_coefficient` using `numpy` and vectorized code. + +You can compare the output of this new function with the one above, and note the speed differences. +``` + +```{solution-start} inequality_ex3 +:class: dropdown +``` + +Let's take a look at some raw data for the US that is stored in `df_income_wealth` + +```{code-cell} ipython3 +df_income_wealth.describe() +``` + +```{code-cell} ipython3 +df_income_wealth.head(n=4) +``` + +We will focus on wealth variable `n_wealth` to compute a Gini coefficient for the year 2016. + +```{code-cell} ipython3 +data = df_income_wealth[df_income_wealth.year == 2016].sample(3000, random_state=1) +``` + +```{code-cell} ipython3 +data.head(n=2) +``` + +We can first compute the Gini coefficient using the function defined in the lecture above. + +```{code-cell} ipython3 +gini_coefficient(data.n_wealth.values) +``` + +Now we can write a vectorized version using `numpy` + +```{code-cell} ipython3 +def gini(y): + n = len(y) + y_1 = np.reshape(y, (n, 1)) + y_2 = np.reshape(y, (1, n)) + g_sum = np.sum(np.abs(y_1 - y_2)) + return g_sum / (2 * n * np.sum(y)) +``` +```{code-cell} ipython3 +gini(data.n_wealth.values) +``` +Let's simulate five populations by drawing from a lognormal distribution as before + +```{code-cell} ipython3 +k = 5 +σ_vals = np.linspace(0.2, 4, k) +n = 2_000 +σ_vals = σ_vals.reshape((k,1)) +μ_vals = -σ_vals**2/2 +y_vals = np.exp(μ_vals + σ_vals*np.random.randn(n)) +``` +We can compute the Gini coefficient for these five populations using the vectorized function, the computation time is shown below: + +```{code-cell} ipython3 +%%time +gini_coefficients =[] +for i in range(k): + gini_coefficients.append(gini(y_vals[i])) +``` +This shows the vectorized function is much faster. +This gives us the Gini coefficients for these five households. + +```{code-cell} ipython3 +gini_coefficients +``` +```{solution-end} +``` + + + diff --git a/lectures/inflation_history.md b/lectures/inflation_history.md index afb36a85..5c5d0740 100644 --- a/lectures/inflation_history.md +++ b/lectures/inflation_history.md @@ -21,6 +21,7 @@ The `xlrd` package is used by `pandas` to perform operations on Excel files. ```{code-cell} ipython3 :tags: [hide-output] + !pip install xlrd ``` @@ -28,6 +29,7 @@ The `xlrd` package is used by `pandas` to perform operations on Excel files. ```{code-cell} ipython3 :tags: [hide-cell] + from importlib.metadata import version from packaging.version import Version @@ -100,16 +102,16 @@ mystnb: caption: Long run time series of the price level name: lrpl --- -df_fig5_bef1914 = df_fig5[df_fig5.index <= 1915] +df_fig5_befe1914 = df_fig5[df_fig5.index <= 1914] # Create plot cols = ['UK', 'US', 'France', 'Castile'] -fig, ax = plt.subplots(dpi=200) +fig, ax = plt.subplots(figsize=(10,6)) for col in cols: - ax.plot(df_fig5_bef1914.index, - df_fig5_bef1914[col], label=col, lw=2) + ax.plot(df_fig5_befe1914.index, + df_fig5_befe1914[col], label=col, lw=2) ax.legend() ax.set_ylabel('Index 1913 = 100') @@ -129,6 +131,10 @@ By staring at {numref}`lrpl` carefully, you might be able to guess when these te During these episodes, the gold/silver standard was temporarily abandoned when a government printed paper money to pay for war expenditures. +```{note} +This quantecon lecture {doc}`french_rev` describes circumstances leading up to and during the big inflation that occurred during the French Revolution. +``` + Despite these temporary lapses, a striking thing about the figure is that price levels were roughly constant over three centuries. In the early century, two other features of this data attracted the attention of [Irving Fisher](https://en.wikipedia.org/wiki/Irving_Fisher) of Yale University and [John Maynard Keynes](https://en.wikipedia.org/wiki/John_Maynard_Keynes) of Cambridge University. @@ -327,11 +333,6 @@ def pr_plot(p_seq, index, ax): # Calculate the difference of log p_seq log_diff_p = np.diff(np.log(p_seq)) - # Graph for the difference of log p_seq - ax.scatter(index[1:], log_diff_p, - label='Monthly inflation rate', - color='tab:grey') - # Calculate and plot moving average diff_smooth = pd.DataFrame(log_diff_p).rolling(3, center=True).mean() ax.plot(index[1:], diff_smooth, label='Moving average (3 period)', alpha=0.5, lw=2) @@ -345,7 +346,7 @@ def pr_plot(p_seq, index, ax): for label in ax.get_xticklabels(): label.set_rotation(45) - ax.legend(loc='upper left') + ax.legend() return ax ``` @@ -419,7 +420,7 @@ p_seq = df_aus['Retail price index, 52 commodities'] e_seq = df_aus['Exchange Rate'] lab = ['Retail price index', - '1/cents per Austrian Krone (Crown)'] + 'Austrian Krones (Crowns) per US cent'] # Create plot fig, ax = plt.subplots(dpi=200) @@ -463,12 +464,11 @@ mystnb: caption: Price index and exchange rate (Hungary) name: pi_xrate_hungary --- -m_seq = df_hun['Notes in circulation'] p_seq = df_hun['Hungarian index of prices'] e_seq = 1 / df_hun['Cents per crown in New York'] lab = ['Hungarian index of prices', - '1/cents per Hungarian Korona (Crown)'] + 'Hungarian Koronas (Crowns) per US cent'] # Create plot fig, ax = plt.subplots(dpi=200) @@ -537,7 +537,7 @@ e_seq[e_seq.index > '05-01-1924'] = np.nan ```{code-cell} ipython3 lab = ['Wholesale price index', - '1/cents per polish mark'] + 'Polish marks per US cent'] # Create plot fig, ax = plt.subplots(dpi=200) @@ -579,7 +579,7 @@ p_seq = df_deu['Price index (on basis of marks before July 1924,' e_seq = 1/df_deu['Cents per mark'] lab = ['Price index', - '1/cents per mark'] + 'Marks per US cent'] # Create plot fig, ax = plt.subplots(dpi=200) @@ -606,7 +606,7 @@ e_seq[e_seq.index > '12-01-1923'] = e_seq[e_seq.index > '12-01-1923'] * 1e12 lab = ['Price index (marks or converted to marks)', - '1/cents per mark (or reichsmark converted to mark)'] + 'Marks per US cent(or reichsmark converted to mark)'] # Create plot fig, ax = plt.subplots(dpi=200) @@ -653,7 +653,7 @@ The US government stood ready to convert a dollar into a specified amount of gol Immediately after World War I, Hungary, Austria, Poland, and Germany were not on the gold standard. -Their currencies were “fiat” or "unbacked", meaning that they were not backed by credible government promises to convert them into gold or silver coins on demand. +Their currencies were "fiat" or "unbacked", meaning that they were not backed by credible government promises to convert them into gold or silver coins on demand. The governments printed new paper notes to pay for goods and services. @@ -669,6 +669,6 @@ Chapter 3 of {cite}`sargent2002big` described deliberate changes in policy that Each government stopped printing money to pay for goods and services once again and made its currency convertible to the US dollar or the UK pound. -The story told in {cite}`sargent2002big` is grounded in a "monetarist theory of the price level" described in {doc}`cagan_ree` and {doc}`cagan_adaptive`. +The story told in {cite}`sargent2002big` is grounded in a *monetarist theory of the price level* described in {doc}`cagan_ree` and {doc}`cagan_adaptive`. Those lectures discuss theories about what owners of those rapidly depreciating currencies were thinking and how their beliefs shaped responses of inflation to government monetary and fiscal policies. diff --git a/lectures/input_output.md b/lectures/input_output.md index 8d6d649b..7c8170c2 100644 --- a/lectures/input_output.md +++ b/lectures/input_output.md @@ -120,7 +120,7 @@ A basic framework for their analysis is After introducing the input-output model, we describe some of its connections to {doc}`linear programming lecture `. -## Input output analysis +## Input-output analysis Let @@ -184,7 +184,7 @@ plt.text(1.6, -0.5, r'$d_{2}$') plt.show() ``` -**Feasible allocations must satisfy** +*Feasible allocations must satisfy* $$ \begin{aligned} @@ -263,8 +263,10 @@ $$ $$ +```{prf:example} +:label: io_ex_tg -For example a two good economy described by +For example a two-good economy described by $$ A = @@ -279,6 +281,7 @@ d = 2 \end{bmatrix} $$ (eq:inout_ex) +``` ```{code-cell} ipython3 A = np.array([[0.1, 40], @@ -336,6 +339,9 @@ $$ Equation {eq}`eq:inout_frontier` sweeps out a **production possibility frontier** of final consumption bundles $d$ that can be produced with exogenous labor input $x_0$. +```{prf:example} +:label: io_ex_ppf + Consider the example in {eq}`eq:inout_ex`. Suppose we are now given @@ -345,6 +351,7 @@ a_0^\top = \begin{bmatrix} 4 & 100 \end{bmatrix} $$ +``` Then we can find $A_0^\top$ by @@ -507,9 +514,9 @@ This illustrates that an element $l_{ij}$ of $L$ shows the total impact on secto ## Applications of graph theory -We can further study input output networks through applications of {doc}`graph theory `. +We can further study input-output networks through applications of {doc}`graph theory `. -An input output network can be represented by a weighted directed graph induced by the adjacency matrix $A$. +An input-output network can be represented by a weighted directed graph induced by the adjacency matrix $A$. The set of nodes $V = [n]$ is the list of sectors and the set of edges is given by @@ -550,7 +557,7 @@ The above figure indicates that manufacturing is the most dominant sector in the ### Output multipliers -Another way to rank sectors in input output networks is via output multipliers. +Another way to rank sectors in input-output networks is via output multipliers. The **output multiplier** of sector $j$ denoted by $\mu_j$ is usually defined as the total sector-wide impact of a unit change of demand in sector $j$. diff --git a/lectures/intro_supply_demand.md b/lectures/intro_supply_demand.md index baacb821..aea36eb5 100644 --- a/lectures/intro_supply_demand.md +++ b/lectures/intro_supply_demand.md @@ -33,7 +33,7 @@ Exports were regarded as good because they brought in bullion (gold flowed into Imports were regarded as bad because bullion was required to pay for them (gold flowed out). -This [zero-sum](https://en.wikipedia.org/wiki/Zero-sum_game) view of economics was eventually overturned by the work of the classical economists such as [Adam Smith](https://en.wikipedia.org/wiki/Adam_Smith) and [David Ricado](https://en.wikipedia.org/wiki/David_Ricardo), who showed how freeing domestic and international trade can enhance welfare. +This [zero-sum](https://en.wikipedia.org/wiki/Zero-sum_game) view of economics was eventually overturned by the work of the classical economists such as [Adam Smith](https://en.wikipedia.org/wiki/Adam_Smith) and [David Ricardo](https://en.wikipedia.org/wiki/David_Ricardo), who showed how freeing domestic and international trade can enhance welfare. There are many different expressions of this idea in economics. @@ -68,6 +68,9 @@ Before we look at the model of supply and demand, it will be helpful to have som ### A discrete example +```{prf:example} +:label: isd_ex_cs + Regarding consumer surplus, suppose that we have a single good and 10 consumers. These 10 consumers have different preferences; in particular, the amount they would be willing to pay for one unit of the good differs. @@ -79,6 +82,7 @@ Suppose that the willingness to pay for each of the 10 consumers is as follows: | willing to pay | 98 | 72 | 41 | 38 | 29 | 21 | 17 | 12 | 11 | 10 | (We have ordered consumers by willingness to pay, in descending order.) +``` If $p$ is the price of the good and $w_i$ is the amount that consumer $i$ is willing to pay, then $i$ buys when $w_i \geq p$. @@ -253,6 +257,9 @@ Let $v_i$ be the price at which producer $i$ is willing to sell the good. When the price is $p$, producer surplus for producer $i$ is $\max\{p - v_i, 0\}$. +```{prf:example} +:label: isd_ex_dc + For example, a producer willing to sell at \$10 and selling at price \$20 makes a surplus of \$10. Total producer surplus is given by @@ -273,6 +280,7 @@ p = 2 q^2 $$ The shaded area is the total producer surplus in this continuous model. +``` ```{code-cell} ipython3 --- @@ -351,7 +359,7 @@ Many of these rules relate to one of the most beautiful and powerful results in We will not try to cover these ideas here, partly because the subject is too big, and partly because you only need to know one rule for this lecture, stated below. -If $f(x) = c + \mathrm{d} x$, then +If $f(x) = c + dx$, then $$ \int_a^b f(x) \mathrm{d} x = c (b - a) + \frac{d}{2}(b^2 - a^2) @@ -670,7 +678,7 @@ This is the competitive equilibrium quantity. Observe that the equilibrium quantity equals the same $q$ given by equation {eq}`eq:old1`. The outcome that the quantity determined by equation {eq}`eq:old1` equates -supply to demand brings us a **key finding:** +supply to demand brings us a *key finding*: * a competitive equilibrium quantity maximizes our welfare criterion @@ -689,11 +697,11 @@ Our generalizations will extend the preceding analysis of a market for a single In addition -* we'll derive **demand curves** from a consumer problem that maximizes a - **utility function** subject to a **budget constraint**. +* we'll derive *demand curves* from a consumer problem that maximizes a + *utility function* subject to a *budget constraint*. -* we'll derive **supply curves** from the problem of a producer who is price - taker and maximizes his profits minus total costs that are described by a **cost function**. +* we'll derive *supply curves* from the problem of a producer who is price + taker and maximizes his profits minus total costs that are described by a *cost function*. ## Exercises diff --git a/lectures/laffer_adaptive.md b/lectures/laffer_adaptive.md index fd7b7f37..684f2e6f 100644 --- a/lectures/laffer_adaptive.md +++ b/lectures/laffer_adaptive.md @@ -33,7 +33,7 @@ that we adopted in lectures {doc}`money_inflation` and lectures {doc}`money_infl We shall discover that changing our hypothesis about expectations formation in this way will change some our findings and leave others intact. In particular, we shall discover that * replacing rational expectations with adaptive expectations leaves the two stationary inflation rates unchanged, but that $\ldots$ -* it reverse the perverse dynamics by making the **lower** stationary inflation rate the one to which the system typically converges +* it reverses the perverse dynamics by making the **lower** stationary inflation rate the one to which the system typically converges * a more plausible comparative dynamic outcome emerges in which now inflation can be **reduced** by running **lower** government deficits These more plausible comparative dynamics underlie the "old time religion" that states that @@ -50,7 +50,7 @@ by dropping rational expectations and instead assuming that people form expecta {cite}`marcet2003recurrent` and {cite}`sargent2009conquest` extended that work and applied it to study recurrent high-inflation episodes in Latin America. ``` -## The Model +## The model Let @@ -88,9 +88,9 @@ $$ (eq:adaptex) where $\delta \in (0,1)$ -## Computing An Equilibrium Sequence +## Computing an equilibrium sequence -Equation the expressions for $m_{t+1}$ promided by {eq}`eq:ada_mdemand` and {eq}`eq:ada_msupply2` and use equation {eq}`eq:adaptex` to eliminate $\pi_t^*$ to obtain +Equation the expressions for $m_{t+1}$ provided by {eq}`eq:ada_mdemand` and {eq}`eq:ada_msupply2` and use equation {eq}`eq:adaptex` to eliminate $\pi_t^*$ to obtain the following equation for $p_t$: $$ @@ -99,7 +99,7 @@ $$ (eq:pequation) **Pseudo-code** -Here is pseudo code for our algorithm. +Here is the pseudo-code for our algorithm. Starting at time $0$ with initial conditions $(m_0, \pi_{-1}^*, p_{-1})$, for each $t \geq 0$ deploy the following steps in order: @@ -111,14 +111,14 @@ deploy the following steps in order: This completes the algorithm. -## Claims or Conjectures +## Claims or conjectures It will turn out that * if they exist, limiting values $\overline \pi$ and $\overline \mu$ will be equal -* if limiting values exists, there are two possible limiting values, one high, one low +* if limiting values exist, there are two possible limiting values, one high, one low * unlike the outcome in lecture {doc}`money_inflation_nonlinear`, for almost all initial log price levels and expected inflation rates $p_0, \pi_{t}^*$, the limiting $\overline \pi = \overline \mu$ is the **lower** steady state value @@ -128,7 +128,7 @@ It will turn out that * the preceding equation for $p_0$ comes from $m_1 - p_0 = - \alpha \bar \pi$ -## Limiting Values of Inflation Rate +## Limiting values of inflation rate As in our earlier lecture {doc}`money_inflation_nonlinear`, we can compute the two prospective limiting values for $\bar \pi$ by studying the steady-state Laffer curve. @@ -213,15 +213,15 @@ print(f'The two steady state of π are: {π_l, π_u}') We find two steady state $\bar \pi$ values -## Steady State Laffer Curve +## Steady-state Laffer curve -The following figure plots the steady state Laffer curve together with the two stationary inflation rates. +The following figure plots the steady-state Laffer curve together with the two stationary inflation rates. ```{code-cell} ipython3 --- mystnb: figure: - caption: Seigniorage as function of steady state inflation. The dashed brown lines + caption: Seigniorage as function of steady-state inflation. The dashed brown lines indicate $\pi_l$ and $\pi_u$. name: laffer_curve_adaptive width: 500px @@ -258,11 +258,11 @@ def plot_laffer(model, πs): plot_laffer(model, (π_l, π_u)) ``` -## Associated Initial Price Levels +## Associated initial price levels Now that we have our hands on the two possible steady states, we can compute two initial log price levels $p_{-1}$, which as initial conditions, imply that $\pi_t = \bar \pi $ for all $t \geq 0$. -In particular, to initiate a fixed point of the dynamic Laffer curve dynamics we set +In particular, to initiate a fixed point of the dynamic Laffer curve dynamics, we set $$ p_{-1} = m_0 + \alpha \pi^* @@ -348,7 +348,7 @@ eq_g = lambda x: np.exp(-model.α * x) - np.exp(-(1 + model.α) * x) print('eq_g == g:', np.isclose(eq_g(m_seq[-1] - m_seq[-2]), model.g)) ``` -## Slippery Side of Laffer Curve Dynamics +## Slippery side of Laffer curve dynamics We are now equipped to compute time series starting from different $p_{-1}, \pi_{-1}^*$ settings, analogous to those in this lecture {doc}`money_inflation` and this lecture {doc}`money_inflation_nonlinear`. diff --git a/lectures/lake_model.md b/lectures/lake_model.md index ba11f07f..f70da94f 100644 --- a/lectures/lake_model.md +++ b/lectures/lake_model.md @@ -36,7 +36,7 @@ The "flows" between the two lakes are as follows: 3. employed workers separate from their jobs at rate $\alpha$. 4. unemployed workers find jobs at rate $\lambda$. -The below graph illustrates the lake model. +The graph below illustrates the lake model. ```{figure} /_static/lecture_specific/lake_model/lake_model_worker.png :name: lake_model_graphviz @@ -216,7 +216,7 @@ Moreover, the times series of unemployment and employment seems to grow at some Since by intuition if we consider unemployment pool and employment pool as a closed system, the growth should be similar to the labor force. -We next ask whether the long run growth rates of $e_t$ and $u_t$ +We next ask whether the long-run growth rates of $e_t$ and $u_t$ also dominated by $1+b-d$ as labor force. The answer will be clearer if we appeal to {ref}`Perron-Frobenius theorem`. diff --git a/lectures/linear_equations.md b/lectures/linear_equations.md index dc4469b6..4b9ccb0f 100644 --- a/lectures/linear_equations.md +++ b/lectures/linear_equations.md @@ -141,10 +141,12 @@ column vectors. The set of all $n$-vectors is denoted by $\mathbb R^n$. -For example, +```{prf:example} +:label: le_ex_dim * $\mathbb R^2$ is the plane --- the set of pairs $(x_1, x_2)$. * $\mathbb R^3$ is 3 dimensional space --- the set of vectors $(x_1, x_2, x_3)$. +``` Often vectors are represented visually as arrows from the origin to the point. @@ -185,7 +187,8 @@ multiplication, which we now describe. When we add two vectors, we add them element-by-element. -For example, +```{prf:example} +:label: le_ex_add $$ \begin{bmatrix} @@ -208,6 +211,7 @@ $$ 1 \end{bmatrix}. $$ +``` In general, @@ -273,7 +277,8 @@ plt.show() Scalar multiplication is an operation that multiplies a vector $x$ with a scalar elementwise. -For example, +```{prf:example} +:label: le_ex_mul $$ -2 @@ -292,6 +297,7 @@ $$ 14 \end{bmatrix}. $$ +``` More generally, it takes a number $\gamma$ and a vector $x$ and produces @@ -429,7 +435,8 @@ matrices. Scalar multiplication and addition are generalizations of the vector case: -Here is an example of scalar multiplication +```{prf:example} +:label: le_ex_asm $$ 3 @@ -443,6 +450,7 @@ $$ 0 & 15 \end{bmatrix}. $$ +``` In general for a number $\gamma$ and any matrix $A$, @@ -461,6 +469,9 @@ $$ \end{bmatrix}. $$ +```{prf:example} +:label: le_ex_ma + Consider this example of matrix addition, $$ @@ -479,6 +490,7 @@ $$ 7 & 12 \end{bmatrix}. $$ +``` In general, @@ -518,6 +530,9 @@ $j$-th column of $B$. If $A$ is $n \times k$ and $B$ is $j \times m$, then to multiply $A$ and $B$ we require $k = j$, and the resulting matrix $A B$ is $n \times m$. +```{prf:example} +:label: le_ex_2dmul + Here's an example of a $2 \times 2$ matrix multiplied by a $2 \times 1$ vector. $$ @@ -536,6 +551,7 @@ Ax = a_{21}x_1 + a_{22}x_2 \end{bmatrix} $$ +``` As an important special case, consider multiplying $n \times k$ matrix $A$ and $k \times 1$ column vector $x$. @@ -839,6 +855,8 @@ In matrix form, the system {eq}`la_se` becomes \end{bmatrix}. ``` +```{prf:example} +:label: le_ex_gls For example, {eq}`n_eq_sys_la` has this form with $$ @@ -848,7 +866,7 @@ $$ \quad \text{and} \quad x = p. $$ - +``` When considering problems such as {eq}`la_gf`, we need to ask at least some of the following questions diff --git a/lectures/lln_clt.md b/lectures/lln_clt.md index 7e7676ce..83f21d43 100644 --- a/lectures/lln_clt.md +++ b/lectures/lln_clt.md @@ -51,6 +51,9 @@ will converge to their population means. Let's see an example of the LLN in action before we go further. +```{prf:example} +:label: lln_ex_ber + Consider a [Bernoulli random variable](https://en.wikipedia.org/wiki/Bernoulli_distribution) $X$ with parameter $p$. This means that $X$ takes values in $\{0,1\}$ and $\mathbb P\{X=1\} = p$. @@ -68,6 +71,7 @@ $$ \mathbb E X = 0 \cdot \mathbb P\{X=0\} + 1 \cdot \mathbb P\{X=1\} = \mathbb P\{X=1\} = p $$ +``` We can generate a draw of $X$ with `scipy.stats` (imported as `st`) as follows: @@ -167,6 +171,7 @@ $$ The next theorem is called Kolmogorov's strong law of large numbers. +(iid-theorem)= ````{prf:theorem} If $X_1, \ldots, X_n$ are IID and $\mathbb E |X|$ is finite, then @@ -368,7 +373,8 @@ The LLN fails to hold here because the assumption $\mathbb E|X| < \infty$ is vio The LLN can also fail to hold when the IID assumption is violated. -For example, suppose that +```{prf:example} +:label: lln_ex_fail $$ X_0 \sim N(0,1) @@ -383,6 +389,7 @@ $$ $$ Therefore, the distribution of $\bar X_n$ is $N(0,1)$ for all $n$! +``` Does this contradict the LLN, which says that the distribution of $\bar X_n$ collapses to the single point $\mu$? @@ -438,9 +445,9 @@ n \to \infty Here $\stackrel { d } {\to} N(0, \sigma^2)$ indicates [convergence in distribution](https://en.wikipedia.org/wiki/Convergence_of_random_variables#Convergence_in_distribution) to a centered (i.e., zero mean) normal with standard deviation $\sigma$. -The striking implication of the CLT is that for **any** distribution with +The striking implication of the CLT is that for any distribution with finite [second moment](https://en.wikipedia.org/wiki/Moment_(mathematics)), the simple operation of adding independent -copies **always** leads to a Gaussian(Normal) curve. +copies always leads to a Gaussian(Normal) curve. @@ -598,7 +605,7 @@ $$ $$ where $\alpha, \beta, \sigma$ are constants and $\epsilon_1, \epsilon_2, -\ldots$ is IID and standard norma. +\ldots$ are IID and standard normal. Suppose that diff --git a/lectures/lp_intro.md b/lectures/lp_intro.md index 4e43965f..102ad4fd 100644 --- a/lectures/lp_intro.md +++ b/lectures/lp_intro.md @@ -32,15 +32,18 @@ Linear programs come in pairs: * an associated **dual** problem. -If a primal problem involves **maximization**, the dual problem involves **minimization**. +If a primal problem involves *maximization*, the dual problem involves *minimization*. -If a primal problem involves **minimization**, the dual problem involves **maximization**. +If a primal problem involves *minimization**, the dual problem involves **maximization*. We provide a standard form of a linear program and methods to transform other forms of linear programming problems into a standard form. We tell how to solve a linear programming problem using [SciPy](https://scipy.org/) and [Google OR-Tools](https://developers.google.com/optimization). -We describe the important concept of complementary slackness and how it relates to the dual problem. +```{seealso} +In another lecture, we will employ the linear programming method to solve the +{doc}`optimal transport problem `. +``` Let's start with some standard imports. @@ -56,7 +59,7 @@ Let's start with some examples of linear programming problem. -## Example 1: Production Problem +## Example 1: production problem This example was created by {cite}`bertsimas_tsitsiklis1997` @@ -78,7 +81,7 @@ Required per unit material and labor inputs and revenues are shown in table b A firm's problem is to construct a production plan that uses its 30 units of materials and 20 units of labor to maximize its revenue. -Let $x_i$ denote the quantity of Product $i$ that the firm produces. +Let $x_i$ denote the quantity of Product $i$ that the firm produces and $z$ denote the total revenue. This problem can be formulated as: @@ -93,6 +96,8 @@ $$ The following graph illustrates the firm's constraints and iso-revenue lines. +Iso-revenue lines show all the combinations of materials and labor that produce the same revenue. + ```{code-cell} ipython3 --- tags: [hide-input] @@ -126,19 +131,19 @@ plt.show() The blue region is the feasible set within which all constraints are satisfied. -Parallel orange lines are iso-revenue lines. +Parallel black lines are iso-revenue lines. -The firm's objective is to find the parallel orange lines to the upper boundary of the feasible set. +The firm's objective is to find the parallel black lines to the upper boundary of the feasible set. -The intersection of the feasible set and the highest orange line delineates the optimal set. +The intersection of the feasible set and the highest black line delineates the optimal set. In this example, the optimal set is the point $(2.5, 5)$. -### Computation: Using OR-Tools +### Computation: using OR-Tools -Let's try to solve the same problem using the package *ortools.linear_solver* +Let's try to solve the same problem using the package `ortools.linear_solver`. @@ -149,7 +154,7 @@ The following cell instantiates a solver and creates two variables specifying th solver = pywraplp.Solver.CreateSolver('GLOP') ``` -Let's us create two variables $x_1$ and $x_2$ such that they can only have nonnegative values. +Let's create two variables $x_1$ and $x_2$ such that they can only have nonnegative values. ```{code-cell} ipython3 # Create the two variables and let them take on any non-negative value. @@ -174,7 +179,7 @@ Let's specify the objective function. We use `solver.Maximize` method in the cas solver.Maximize(3 * x1 + 4 * x2) ``` -Once we solve the problem, we can check whether the solver was successful in solving the problem using it's status. If it's successful, then the status will be equal to `pywraplp.Solver.OPTIMAL`. +Once we solve the problem, we can check whether the solver was successful in solving the problem using its status. If it's successful, then the status will be equal to `pywraplp.Solver.OPTIMAL`. ```{code-cell} ipython3 # Solve the system. @@ -182,26 +187,24 @@ status = solver.Solve() if status == pywraplp.Solver.OPTIMAL: print('Objective value =', solver.Objective().Value()) - x1_sol = round(x1.solution_value(), 2) - x2_sol = round(x2.solution_value(), 2) - print(f'(x1, x2): ({x1_sol}, {x2_sol})') + print(f'(x1, x2): ({x1.solution_value():.2}, {x2.solution_value():.2})') else: print('The problem does not have an optimal solution.') ``` -## Example 2: Investment Problem +## Example 2: investment problem We now consider a problem posed and solved by {cite}`hu_guo2018`. -A mutual fund has $ \$ 100,000$ to be invested over a three year horizon. +A mutual fund has $ \$ 100,000$ to be invested over a three-year horizon. Three investment options are available: -1. **Annuity:** the fund can pay a same amount of new capital at the beginning of each of three years and receive a payoff of 130\% of **total capital** invested at the end of the third year. Once the mutual fund decides to invest in this annuity, it has to keep investing in all subsequent years in the three year horizon. +1. Annuity: the fund can pay a same amount of new capital at the beginning of each of three years and receive a payoff of 130\% of total capital invested at the end of the third year. Once the mutual fund decides to invest in this annuity, it has to keep investing in all subsequent years in the three year horizon. -2. **Bank account:** the fund can deposit any amount into a bank at the beginning of each year and receive its capital plus 6\% interest at the end of that year. In addition, the mutual fund is permitted to borrow no more than $20,000 at the beginning of each year and is asked to pay back the amount borrowed plus 6\% interest at the end of the year. The mutual fund can choose whether to deposit or borrow at the beginning of each year. +2. Bank account: the fund can deposit any amount into a bank at the beginning of each year and receive its capital plus 6\% interest at the end of that year. In addition, the mutual fund is permitted to borrow no more than $20,000 at the beginning of each year and is asked to pay back the amount borrowed plus 6\% interest at the end of the year. The mutual fund can choose whether to deposit or borrow at the beginning of each year. -3. **Corporate bond:** At the beginning of the second year, a corporate bond becomes available. +3. Corporate bond: At the beginning of the second year, a corporate bond becomes available. The fund can buy an amount that is no more than $ \$ $50,000 of this bond at the beginning of the second year and at the end of the third year receive a payout of 130\% of the amount invested in the bond. @@ -271,9 +274,9 @@ $$ -### Computation: Using OR-Tools +### Computation: using OR-Tools -Let's try to solve the above problem using the package *ortools.linear_solver*. +Let's try to solve the above problem using the package `ortools.linear_solver`. The following cell instantiates a solver and creates two variables specifying the range of values that they can have. @@ -282,7 +285,7 @@ The following cell instantiates a solver and creates two variables specifying th solver = pywraplp.Solver.CreateSolver('GLOP') ``` -Let's us create five variables $x_1, x_2, x_3, x_4,$ and $x_5$ such that they can only have the values defined in the above constraints. +Let's create five variables $x_1, x_2, x_3, x_4,$ and $x_5$ such that they can only have the values defined in the above constraints. ```{code-cell} ipython3 # Create the variables using the ranges available from constraints @@ -339,7 +342,7 @@ OR-Tools tells us that the best investment strategy is: 3. At the beginning of the third year, the bank balance should be $ \$75,072.245 $. -4. At the end of the third year, the mutual fund will get payouts from the annuity and corporate bond and repay its loan from the bank. At the end it will own $ \$141018.24 $, so that it's total net rate of return over the three periods is $ 41.02\%$. +4. At the end of the third year, the mutual fund will get payouts from the annuity and corporate bond and repay its loan from the bank. At the end it will own $ \$141,018.24 $, so that it's total net rate of return over the three periods is $ 41.02\%$. @@ -380,7 +383,7 @@ c = \begin{bmatrix} c_1 \\ c_2 \\ \vdots \\ c_n \\ \end{bmatrix}, \quad x = \begin{bmatrix} x_1 \\ x_2 \\ \vdots \\ x_n \\ \end{bmatrix}. \quad $$ -The standard form LP problem can be expressed concisely as: +The standard form linear programming problem can be expressed concisely as: $$ \begin{aligned} @@ -400,15 +403,15 @@ It is useful to know how to transform a problem that initially is not stated in By deploying the following steps, any linear programming problem can be transformed into an equivalent standard form linear programming problem. -1. **Objective Function:** If a problem is originally a constrained **maximization** problem, we can construct a new objective function that is the additive inverse of the original objective function. The transformed problem is then a **minimization** problem. +1. Objective function: If a problem is originally a constrained *maximization* problem, we can construct a new objective function that is the additive inverse of the original objective function. The transformed problem is then a *minimization* problem. -2. **Decision Variables:** Given a variable $x_j$ satisfying $x_j \le 0$, we can introduce a new variable $x_j' = - x_j$ and substitute it into original problem. Given a free variable $x_i$ with no restriction on its sign, we can introduce two new variables $x_j^+$ and $x_j^-$ satisfying $x_j^+, x_j^- \ge 0$ and replace $x_j$ by $x_j^+ - x_j^-$. +2. Decision variables: Given a variable $x_j$ satisfying $x_j \le 0$, we can introduce a new variable $x_j' = - x_j$ and substitute it into original problem. Given a free variable $x_i$ with no restriction on its sign, we can introduce two new variables $x_j^+$ and $x_j^-$ satisfying $x_j^+, x_j^- \ge 0$ and replace $x_j$ by $x_j^+ - x_j^-$. -3. **Inequality constraints:** Given an inequality constraint $\sum_{j=1}^n a_{ij}x_j \le 0$, we can introduce a new variable $s_i$, called a **slack variable** that satisfies $s_i \ge 0$ and replace the original constraint by $\sum_{j=1}^n a_{ij}x_j + s_i = 0$. +3. Inequality constraints: Given an inequality constraint $\sum_{j=1}^n a_{ij}x_j \le 0$, we can introduce a new variable $s_i$, called a **slack variable** that satisfies $s_i \ge 0$ and replace the original constraint by $\sum_{j=1}^n a_{ij}x_j + s_i = 0$. Let's apply the above steps to the two examples described above. -### Example 1: Production Problem +### Example 1: production problem The original problem is: @@ -434,9 +437,9 @@ $$ -### Computation: Using SciPy +### Computation: using SciPy -The package *scipy.optimize* provides a function ***linprog*** to solve linear programming problems with a form below: +The package `scipy.optimize` provides a function `linprog` to solve linear programming problems with a form below: $$ \begin{aligned} @@ -447,8 +450,10 @@ $$ \end{aligned} $$ +$A_{eq}, b_{eq}$ denote the equality constraint matrix and vector, and $A_{ub}, b_{ub}$ denote the inequality constraint matrix and vector. + ```{note} -By default $l = 0$ and $u = \text{None}$ unless explicitly specified with the argument 'bounds'. +By default $l = 0$ and $u = \text{None}$ unless explicitly specified with the argument `bounds`. ``` Let's now try to solve the Problem 1 using SciPy. @@ -480,7 +485,7 @@ else: The optimal plan tells the factory to produce $2.5$ units of Product 1 and $5$ units of Product 2; that generates a maximizing value of revenue of $27.5$. -We are using the *linprog* function as a **black box**. +We are using the `linprog` function as a *black box*. Inside it, Python first transforms the problem into standard form. @@ -491,12 +496,12 @@ Here the vector of slack variables is a two-dimensional NumPy array that equals See the [official documentation](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linprog.html#scipy.optimize.linprog) for more details. ```{note} -This problem is to maximize the objective, so that we need to put a minus sign in front of parameter vector c. +This problem is to maximize the objective, so that we need to put a minus sign in front of parameter vector $c$. ``` -### Example 2: Investment Problem +### Example 2: investment problem The original problem is: @@ -544,14 +549,14 @@ c_ex2 = np.array([1.30*3, 0, 0, 1.06, 1.30]) A_ex2 = np.array([[1, 1, 0, 0, 0], [1, -rate, 1, 0, 1], [1, 0, -rate, 1, 0]]) -b_ex2 = np.array([100000, 0, 0]) +b_ex2 = np.array([100_000, 0, 0]) # Bounds on decision variables bounds_ex2 = [( 0, None), - (-20000, None), - (-20000, None), - (-20000, None), - ( 0, 50000)] + (-20_000, None), + (-20_000, None), + (-20_000, None), + ( 0, 50_000)] ``` Let's solve the problem and check the status using `success` attribute. @@ -583,7 +588,7 @@ SciPy tells us that the best investment strategy is: 3. At the beginning of the third year, the mutual fund should borrow $ \$20,000$ from the bank and invest in the annuity. -4. At the end of the third year, the mutual fund will get payouts from the annuity and corporate bond and repay its loan from the bank. At the end it will own $ \$141018.24 $, so that it's total net rate of return over the three periods is $ 41.02\% $. +4. At the end of the third year, the mutual fund will get payouts from the annuity and corporate bond and repay its loan from the bank. At the end it will own $ \$141,018.24 $, so that it's total net rate of return over the three periods is $ 41.02\% $. @@ -702,7 +707,7 @@ $$ # Instantiate a GLOP(Google Linear Optimization Package) solver solver = pywraplp.Solver.CreateSolver('GLOP') ``` -Let's us create two variables $x_1$ and $x_2$ such that they can only have nonnegative values. +Let's create two variables $x_1$ and $x_2$ such that they can only have nonnegative values. ```{code-cell} ipython3 # Create the two variables and let them take on any non-negative value. diff --git a/lectures/markov_chains_I.md b/lectures/markov_chains_I.md index c50d472e..631dd70f 100644 --- a/lectures/markov_chains_I.md +++ b/lectures/markov_chains_I.md @@ -61,6 +61,8 @@ import matplotlib as mpl from mpl_toolkits.mplot3d import Axes3D from matplotlib.animation import FuncAnimation from IPython.display import HTML +from matplotlib.patches import Polygon +from mpl_toolkits.mplot3d.art3d import Poly3DCollection ``` ## Definitions and examples @@ -743,6 +745,11 @@ This is, in some sense, a steady state probability of unemployment. Not surprisingly it tends to zero as $\beta \to 0$, and to one as $\alpha \to 0$. + + + + + ### Calculating stationary distributions A stable algorithm for computing stationary distributions is implemented in [QuantEcon.py](http://quantecon.org/quantecon-py). @@ -757,6 +764,11 @@ mc = qe.MarkovChain(P) mc.stationary_distributions # Show all stationary distributions ``` + + + + + ### Asymptotic stationarity Consider an everywhere positive stochastic matrix with unique stationary distribution $\psi^*$. @@ -767,17 +779,24 @@ For example, we have the following result (strict_stationary)= ```{prf:theorem} +:label: mc_gs_thm + If there exists an integer $m$ such that all entries of $P^m$ are -strictly positive, with unique stationary distribution $\psi^*$, then +strictly positive, then $$ \psi_0 P^t \to \psi^* \quad \text{ as } t \to \infty $$ + +where $\psi^*$ is the unique stationary distribution. ``` +This situation is often referred to as **asymptotic stationarity** or **global stability**. + +A proof of the theorem can be found in Chapter 4 of {cite}`sargent2023economic`, as well as many other sources. + -See, for example, {cite}`sargent2023economic` Chapter 4. @@ -793,7 +812,7 @@ P = np.array([[0.971, 0.029, 0.000], P @ P ``` -Let's pick an initial distribution $\psi_0$ and trace out the sequence of distributions $\psi_0 P^t$ for $t = 0, 1, 2, \ldots$ +Let's pick an initial distribution $\psi_1, \psi_2, \psi_3$ and trace out the sequence of distributions $\psi_i P^t$ for $t = 0, 1, 2, \ldots$, for $i=1, 2, 3$. First, we write a function to iterate the sequence of distributions for `ts_length` period @@ -810,26 +829,46 @@ def iterate_ψ(ψ_0, P, ts_length): Now we plot the sequence ```{code-cell} ipython3 -ψ_0 = (0.0, 0.2, 0.8) # Initial condition +:tags: [hide-input] + +ψ_1 = (0.0, 0.0, 1.0) +ψ_2 = (1.0, 0.0, 0.0) +ψ_3 = (0.0, 1.0, 0.0) # Three initial conditions +colors = ['blue','red', 'green'] # Different colors for each initial point + +# Define the vertices of the unit simplex +v = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]]) + +# Define the faces of the unit simplex +faces = [ + [v[0], v[1], v[2]], + [v[0], v[1], v[3]], + [v[0], v[2], v[3]], + [v[1], v[2], v[3]] +] fig = plt.figure() ax = fig.add_subplot(projection='3d') -def update(n): - ψ_t = iterate_ψ(ψ_0, P, n+1) - +def update(n): ax.clear() ax.set_xlim([0, 1]) ax.set_ylim([0, 1]) ax.set_zlim([0, 1]) - ax.view_init(30, 210) + ax.view_init(45, 45) - for i, point in enumerate(ψ_t): - ax.scatter(point[0], point[1], point[2], color='r', s=60, alpha=(i+1)/len(ψ_t)) + simplex = Poly3DCollection(faces, alpha=0.03) + ax.add_collection3d(simplex) + for idx, ψ_0 in enumerate([ψ_1, ψ_2, ψ_3]): + ψ_t = iterate_ψ(ψ_0, P, n+1) + + for i, point in enumerate(ψ_t): + ax.scatter(point[0], point[1], point[2], color=colors[idx], s=60, alpha=(i+1)/len(ψ_t)) + mc = qe.MarkovChain(P) ψ_star = mc.stationary_distributions[0] - ax.scatter(ψ_star[0], ψ_star[1], ψ_star[2], c='k', s=60) + ax.scatter(ψ_star[0], ψ_star[1], ψ_star[2], c='yellow', s=60) return fig, @@ -841,110 +880,105 @@ HTML(anim.to_jshtml()) Here * $P$ is the stochastic matrix for recession and growth {ref}`considered above `. -* The highest red dot is an arbitrarily chosen initial marginal probability distribution $\psi_0$, represented as a vector in $\mathbb R^3$. -* The other red dots are the marginal distributions $\psi_0 P^t$ for $t = 1, 2, \ldots$. -* The black dot is $\psi^*$. +* The red, blue and green dots are initial marginal probability distributions $\psi_1, \psi_2, \psi_3$, each of which is represented as a vector in $\mathbb R^3$. +* The transparent dots are the marginal distributions $\psi_i P^t$ for $t = 1, 2, \ldots$, for $i=1,2,3.$. +* The yellow dot is $\psi^*$. You might like to try experimenting with different initial conditions. -#### An alternative illustration - -We can show this in a slightly different way by focusing on the probability that $\psi_t$ puts on each state. -First, we write a function to draw initial distributions $\psi_0$ of size `num_distributions` - -```{code-cell} ipython3 -def generate_initial_values(num_distributions): - n = len(P) - - draws = np.random.randint(1, 10_000_000, size=(num_distributions,n)) - ψ_0s = draws/draws.sum(axis=1)[:, None] - return ψ_0s -``` +#### Example: failure of convergence -We then write a function to plot the dynamics of $(\psi_0 P^t)(i)$ as $t$ gets large, for each state $i$ with different initial distributions -```{code-cell} ipython3 -def plot_distribution(P, ts_length, num_distributions): +Consider the periodic chain with stochastic matrix - # Get parameters of transition matrix - n = len(P) - mc = qe.MarkovChain(P) - ψ_star = mc.stationary_distributions[0] +$$ +P = +\begin{bmatrix} + 0 & 1 \\ + 1 & 0 \\ +\end{bmatrix} +$$ - ## Draw the plot - fig, axes = plt.subplots(nrows=1, ncols=n, figsize=[11, 5]) - plt.subplots_adjust(wspace=0.35) +This matrix does not satisfy the conditions of +{ref}`strict_stationary` because, as you can readily check, - ψ_0s = generate_initial_values(num_distributions) +* $P^m = P$ when $m$ is odd and +* $P^m = I$, the identity matrix, when $m$ is even. - # Get the path for each starting value - for ψ_0 in ψ_0s: - ψ_t = iterate_ψ(ψ_0, P, ts_length) +Hence there is no $m$ such that all elements of $P^m$ are strictly positive. - # Obtain and plot distributions at each state - for i in range(n): - axes[i].plot(range(0, ts_length), ψ_t[:,i], alpha=0.3) +Moreover, we can see that global stability does not hold. - # Add labels - for i in range(n): - axes[i].axhline(ψ_star[i], linestyle='dashed', lw=2, color = 'black', - label = fr'$\psi^*({i})$') - axes[i].set_xlabel('t') - axes[i].set_ylabel(fr'$\psi_t({i})$') - axes[i].legend() +For instance, if we start at $\psi_0 = (1,0)$, then $\psi_m = \psi_0 P^m$ is $(1, 0)$ when $m$ is even and $(0,1)$ when $m$ is odd. - plt.show() -``` +We can see similar phenomena in higher dimensions. -The following figure shows +The next figure illustrates this for a periodic Markov chain with three states. ```{code-cell} ipython3 -# Define the number of iterations -# and initial distributions -ts_length = 50 -num_distributions = 25 - -P = np.array([[0.971, 0.029, 0.000], - [0.145, 0.778, 0.077], - [0.000, 0.508, 0.492]]) - -plot_distribution(P, ts_length, num_distributions) -``` - -The convergence to $\psi^*$ holds for different initial distributions. +:tags: [hide-input] +ψ_1 = (0.0, 0.0, 1.0) +ψ_2 = (0.5, 0.5, 0.0) +ψ_3 = (0.25, 0.25, 0.5) +ψ_4 = (1/3, 1/3, 1/3) +P = np.array([[0.0, 1.0, 0.0], + [0.0, 0.0, 1.0], + [1.0, 0.0, 0.0]]) -#### Example: failure of convergence +fig = plt.figure() +ax = fig.add_subplot(projection='3d') +colors = ['red','yellow', 'green', 'blue'] # Different colors for each initial point +# Define the vertices of the unit simplex +v = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]]) -In the case of a periodic chain, with +# Define the faces of the unit simplex +faces = [ + [v[0], v[1], v[2]], + [v[0], v[1], v[3]], + [v[0], v[2], v[3]], + [v[1], v[2], v[3]] +] -$$ -P = -\begin{bmatrix} - 0 & 1 \\ - 1 & 0 \\ -\end{bmatrix} -$$ +def update(n): + ax.clear() + ax.set_xlim([0, 1]) + ax.set_ylim([0, 1]) + ax.set_zlim([0, 1]) + ax.view_init(45, 45) + + # Plot the 3D unit simplex as planes + simplex = Poly3DCollection(faces,alpha=0.05) + ax.add_collection3d(simplex) + + for idx, ψ_0 in enumerate([ψ_1, ψ_2, ψ_3, ψ_4]): + ψ_t = iterate_ψ(ψ_0, P, n+1) + + point = ψ_t[-1] + ax.scatter(point[0], point[1], point[2], color=colors[idx], s=60) + points = np.array(ψ_t) + ax.plot(points[:, 0], points[:, 1], points[:, 2], color=colors[idx],linewidth=0.75) + + return fig, -we find the distribution oscillates +anim = FuncAnimation(fig, update, frames=range(20), blit=False, repeat=False) +plt.close() +HTML(anim.to_jshtml()) +``` +This animation demonstrates the behavior of an irreducible and periodic stochastic matrix. -```{code-cell} ipython3 -P = np.array([[0, 1], - [1, 0]]) +The red, yellow, and green dots represent different initial probability distributions. -ts_length = 20 -num_distributions = 30 +The blue dot represents the unique stationary distribution. -plot_distribution(P, ts_length, num_distributions) -``` +Unlike Hamilton’s Markov chain, these initial distributions do not converge to the unique stationary distribution. -Indeed, this $P$ fails our asymptotic stationarity condition, since, as you can -verify, $P^t$ is not everywhere positive for any $t$. +Instead, they cycle periodically around the probability simplex, illustrating that asymptotic stability fails. (finite_mc_expec)= @@ -1105,42 +1139,6 @@ mc = qe.MarkovChain(P) ψ_star ``` -Solution 3: - -We find the distribution $\psi$ converges to the stationary distribution more quickly compared to the {ref}`hamilton's chain `. - -```{code-cell} ipython3 -ts_length = 10 -num_distributions = 25 -plot_distribution(P, ts_length, num_distributions) -``` - -In fact, the rate of convergence is governed by {ref}`eigenvalues` {cite}`sargent2023economic`. - -```{code-cell} ipython3 -P_eigenvals = np.linalg.eigvals(P) -P_eigenvals -``` - -```{code-cell} ipython3 -P_hamilton = np.array([[0.971, 0.029, 0.000], - [0.145, 0.778, 0.077], - [0.000, 0.508, 0.492]]) - -hamilton_eigenvals = np.linalg.eigvals(P_hamilton) -hamilton_eigenvals -``` - -More specifically, it is governed by the spectral gap, the difference between the largest and the second largest eigenvalue. - -```{code-cell} ipython3 -sp_gap_P = P_eigenvals[0] - np.diff(P_eigenvals)[0] -sp_gap_hamilton = hamilton_eigenvals[0] - np.diff(hamilton_eigenvals)[0] - -sp_gap_P > sp_gap_hamilton -``` - -We will come back to this when we discuss {ref}`spectral theory`. ```{solution-end} ``` diff --git a/lectures/markov_chains_II.md b/lectures/markov_chains_II.md index 59b8e23c..fe2ad9b3 100644 --- a/lectures/markov_chains_II.md +++ b/lectures/markov_chains_II.md @@ -71,6 +71,8 @@ that The stochastic matrix $P$ is called **irreducible** if all states communicate; that is, if $x$ and $y$ communicate for all $(x, y)$ in $S \times S$. +```{prf:example} +:label: mc2_ex_ir For example, consider the following transition probabilities for wealth of a fictitious set of households @@ -95,6 +97,7 @@ $$ It's clear from the graph that this stochastic matrix is irreducible: we can eventually reach any state from any other state. +``` We can also test this using [QuantEcon.py](http://quantecon.org/quantecon-py)'s MarkovChain class @@ -107,6 +110,9 @@ mc = qe.MarkovChain(P, ('poor', 'middle', 'rich')) mc.is_irreducible ``` +```{prf:example} +:label: mc2_ex_pf + Here's a more pessimistic scenario in which poor people remain poor forever ```{image} /_static/lecture_specific/markov_chains_II/Irre_2.png @@ -116,6 +122,7 @@ Here's a more pessimistic scenario in which poor people remain poor forever This stochastic matrix is not irreducible since, for example, rich is not accessible from poor. +``` Let's confirm this @@ -272,6 +279,9 @@ In any of these cases, ergodicity will hold. ### Example: a periodic chain +```{prf:example} +:label: mc2_ex_pc + Let's look at the following example with states 0 and 1: $$ @@ -291,7 +301,7 @@ The transition graph shows that this model is irreducible. ``` Notice that there is a periodic cycle --- the state cycles between the two states in a regular way. - +``` Not surprisingly, this property is called [periodicity](https://stats.libretexts.org/Bookshelves/Probability_Theory/Probability_Mathematical_Statistics_and_Stochastic_Processes_(Siegrist)/16%3A_Markov_Processes/16.05%3A_Periodicity_of_Discrete-Time_Chains). @@ -392,7 +402,7 @@ plt.show() ````{exercise} :label: mc_ex1 -Benhabib el al. {cite}`benhabib_wealth_2019` estimated that the transition matrix for social mobility as the following +Benhabib et al. {cite}`benhabib_wealth_2019` estimated that the transition matrix for social mobility as the following $$ P:= diff --git a/lectures/mle.md b/lectures/mle.md index ee00c399..8a15d6ac 100644 --- a/lectures/mle.md +++ b/lectures/mle.md @@ -39,6 +39,8 @@ $$ where $w$ is wealth. +```{prf:example} +:label: mle_ex_wt For example, if $a = 0.05$, $b = 0.1$, and $\bar w = 2.5$, this means @@ -46,7 +48,7 @@ For example, if $a = 0.05$, $b = 0.1$, and $\bar w = 2.5$, this means * a 10% tax on wealth in excess of 2.5. The unit is 100,000, so $w= 2.5$ means 250,000 dollars. - +``` Let's go ahead and define $h$: ```{code-cell} ipython3 @@ -242,7 +244,7 @@ num = (ln_sample - μ_hat)**2 σ_hat ``` -Let's plot the log-normal pdf using the estimated parameters against our sample data. +Let's plot the lognormal pdf using the estimated parameters against our sample data. ```{code-cell} ipython3 dist_lognorm = lognorm(σ_hat, scale = exp(μ_hat)) diff --git a/lectures/money_inflation.md b/lectures/money_inflation.md index 2b3d1721..216a9a63 100644 --- a/lectures/money_inflation.md +++ b/lectures/money_inflation.md @@ -35,7 +35,7 @@ Our model equates the demand for money to the supply at each time $t \geq 0$. Equality between those demands and supply gives a *dynamic* model in which money supply and price level *sequences* are simultaneously determined by a set of simultaneous linear equations. -These equations take the form of what are often called vector linear **difference equations**. +These equations take the form of what is often called vector linear **difference equations**. In this lecture, we'll roll up our sleeves and solve those equations in two different ways. @@ -49,19 +49,19 @@ In this lecture we will encounter these concepts from macroeconomics: * perverse dynamics under rational expectations in which the system converges to the higher stationary inflation tax rate * a peculiar comparative stationary-state outcome connected with that stationary inflation rate: it asserts that inflation can be *reduced* by running *higher* government deficits, i.e., by raising more resources by printing money. -The same qualitive outcomes prevail in this lecture {doc}`money_inflation_nonlinear` that studies a nonlinear version of the model in this lecture. +The same qualitative outcomes prevail in this lecture {doc}`money_inflation_nonlinear` that studies a nonlinear version of the model in this lecture. These outcomes set the stage for the analysis to be presented in this lecture {doc}`laffer_adaptive` that studies a nonlinear version of the present model; it assumes a version of "adaptive expectations" instead of rational expectations. That lecture will show that * replacing rational expectations with adaptive expectations leaves the two stationary inflation rates unchanged, but that $\ldots$ -* it reverse the pervese dynamics by making the *lower* stationary inflation rate the one to which the system typically converges +* it reverses the perverse dynamics by making the *lower* stationary inflation rate the one to which the system typically converges * a more plausible comparative dynamic outcome emerges in which now inflation can be *reduced* by running *lower* government deficits -This outcome will be used to justify a selection of a stationary inflation rate that underlies the analysis of unpleasant monetarist arithmetic to be studies in this lecture {doc}`unpleasant`. +This outcome will be used to justify a selection of a stationary inflation rate that underlies the analysis of unpleasant monetarist arithmetic to be studied in this lecture {doc}`unpleasant`. -We'll use theses tools from linear algebra: +We'll use these tools from linear algebra: * matrix multiplication * matrix inversion @@ -170,7 +170,7 @@ We shall describe two distinct but closely related ways of computing a pair $\ But first it is instructive to describe a special type of equilibrium known as a **steady state**. -In a steady state equilibrium, a subset of key variables remain constant or **invariant** over time, while remaining variables can be expressed as functions of those constant variables. +In a steady-state equilibrium, a subset of key variables remain constant or **invariant** over time, while remaining variables can be expressed as functions of those constant variables. Finding such state variables is something of an art. @@ -180,7 +180,7 @@ This is true in the present model. ### Steady states -In a **steady state** equilibrium of the model we are studying, +In a steady-state equilibrium of the model we are studying, $$ \begin{aligned} @@ -229,7 +229,7 @@ $$ R_t \in [\underline R, \overline R], \quad t \geq 0. $$ -Maximizing steady state seigniorage {eq}`eq:SSsigng` with respect to $\bar R$, we find that the maximizing rate of return on currency is +Maximizing steady-state seigniorage {eq}`eq:SSsigng` with respect to $\bar R$, we find that the maximizing rate of return on currency is $$ \bar R_{\rm max} = \sqrt{\frac{\gamma_2}{\gamma_1}} @@ -263,7 +263,7 @@ plt.rcParams['figure.dpi'] = 300 from collections import namedtuple ``` -Let's set some parameter values and compute possible steady state rates of return on currency $\bar R$, the seigniorage maximizing rate of return on currency, and an object that we'll discuss later, namely, an initial price level $p_0$ associated with the maximum steady state rate of return on currency. +Let's set some parameter values and compute possible steady-state rates of return on currency $\bar R$, the seigniorage maximizing rate of return on currency, and an object that we'll discuss later, namely, an initial price level $p_0$ associated with the maximum steady-state rate of return on currency. First, we create a `namedtuple` to store parameters so that we can reuse this `namedtuple` in our functions throughout this lecture @@ -337,7 +337,7 @@ plt.show() Let's print the two steady-state rates of return $\bar R$ and the associated seigniorage revenues that the government collects. -(By construction, both steady state rates of return should raise the same amounts real revenue.) +(By construction, both steady-state rates of return should raise the same amounts real revenue.) We hope that the following code will confirm this. @@ -349,7 +349,7 @@ g2 = seign(msm.R_l, msm) print(f'R_l, g_l = {msm.R_l:.4f}, {g2:.4f}') ``` -Now let's compute the maximum steady state amount of seigniorage that could be gathered by printing money and the state state rate of return on money that attains it. +Now let's compute the maximum steady-state amount of seigniorage that could be gathered by printing money and the state-state rate of return on money that attains it. ## Two computation strategies @@ -434,7 +434,7 @@ As we shall see soon, selecting an initial $p_0$ in method 2 is intimately tied %b_0 = \gamma_1 - \gamma_0 R_0^{-1} %$$ -Remember that there exist two steady state equilibrium values $ R_\ell < R_u$ of the rate of return on currency $R_t$. +Remember that there exist two steady-state equilibrium values $ R_\ell < R_u$ of the rate of return on currency $R_t$. We proceed as follows. @@ -460,7 +460,7 @@ condition $R_0$. The quantity $1 - R_t$ can be interpreted as an **inflation tax rate** that the government imposes on holders of its currency. -We shall soon see that the existence of two steady state rates of return on currency +We shall soon see that the existence of two steady-state rates of return on currency that serve to finance the government deficit of $g$ indicates the presence of a **Laffer curve** in the inflation tax rate. ```{note} @@ -746,7 +746,7 @@ y^*_{t+1} = \Lambda^t y^*_t . $$ (eq:stardynamics) This equation represents the dynamics of our system in a way that lets us isolate the -force that causes gross inflation to converge to the inverse of the lower steady state rate +force that causes gross inflation to converge to the inverse of the lower steady-state rate of inflation $R_\ell$ that we discovered earlier. Staring at equation {eq}`eq:stardynamics` indicates that unless @@ -950,7 +950,7 @@ Those dynamics are "perverse" not only in the sense that they imply that the mon ```{note} -The same qualitive outcomes prevail in this lecture {doc}`money_inflation_nonlinear` that studies a nonlinear version of the model in this lecture. +The same qualitative outcomes prevail in this lecture {doc}`money_inflation_nonlinear` that studies a nonlinear version of the model in this lecture. ``` diff --git a/lectures/money_inflation_nonlinear.md b/lectures/money_inflation_nonlinear.md index 7bd8306a..f716916f 100644 --- a/lectures/money_inflation_nonlinear.md +++ b/lectures/money_inflation_nonlinear.md @@ -35,17 +35,17 @@ As in that lecture, we discussed these topics: * an **inflation tax** that a government gathers by printing paper or electronic money * a dynamic **Laffer curve** in the inflation tax rate that has two stationary equilibria * perverse dynamics under rational expectations in which the system converges to the higher stationary inflation tax rate -* a peculiar comparative stationary-state analysis connected with that stationary inflation rate that assert that inflation can be *reduced* by running *higher* government deficits +* a peculiar comparative stationary-state analysis connected with that stationary inflation rate that asserts that inflation can be *reduced* by running *higher* government deficits These outcomes will set the stage for the analysis of {doc}`laffer_adaptive` that studies a version of the present model that uses a version of "adaptive expectations" instead of rational expectations. That lecture will show that * replacing rational expectations with adaptive expectations leaves the two stationary inflation rates unchanged, but that $\ldots$ -* it reverse the pervese dynamics by making the *lower* stationary inflation rate the one to which the system typically converges +* it reverses the perverse dynamics by making the *lower* stationary inflation rate the one to which the system typically converges * a more plausible comparative dynamic outcome emerges in which now inflation can be *reduced* by running *lower* government deficits -## The model +## The Model Let @@ -70,56 +70,9 @@ where $g$ is the part of government expenditures financed by printing money. **Remark:** Please notice that while equation {eq}`eq:mdemand` is linear in logs of the money supply and price level, equation {eq}`eq:msupply` is linear in levels. This will require adapting the equilibrium computation methods that we deployed in {doc}`money_inflation`. -## Computing an equilibrium sequence -We'll deploy a method similar to *Method 2* used in {doc}`money_inflation`. - -We'll take the time $t$ state vector to be $m_t, p_t$. - -* we'll treat $m_t$ as a ''natural state variable'' and $p_t$ as a ''jump'' variable. - -Let - -$$ -\lambda \equiv \frac{\alpha}{1+ \alpha} -$$ - -Let's rewrite equation {eq}`eq:mdemand`, respectively, as - -$$ -p_t = (1-\lambda) m_{t+1} + \lambda p_{t+1} -$$ (eq:mdemand2) - -We'll summarize our algorithm with the following pseudo-code. - -**Pseudo-code** - -* start for $m_0, p_0$ at time $t =0$ - -* solve {eq}`eq:msupply` for $m_{t+1}$ - -* solve {eq}`eq:mdemand2` for $p_{t+1} = \lambda^{-1} p_t + (1 - \lambda^{-1}) m_{t+1}$ - -* compute the inflation rate $\pi_t = p_{t+1} - p_t$ and growth of money supply $\mu_t = m_{t+1} - m_t $ -* iterate on $t$ to convergence of $\pi_t \rightarrow \overline \pi$ and $\mu_t \rightarrow \overline \mu$ - -It will turn out that - -* if they exist, limiting values $\overline \pi$ and $\overline \mu$ will be equal - -* if limiting values exist, there are two possible limiting values, one high, one low - -* for almost all initial log price levels $p_0$, the limiting $\overline \pi = \overline \mu$ is -the higher value - -* for each of the two possible limiting values $\overline \pi$ ,there is a unique initial log price level $p_0$ that implies that $\pi_t = \mu_t = \overline \mu$ for all $t \geq 0$ - - * this unique initial log price level solves $\log(\exp(m_0) + g \exp(p_0)) - p_0 = - \alpha \overline \pi $ - - * the preceding equation for $p_0$ comes from $m_1 - p_0 = - \alpha \overline \pi$ - -## Limiting values of inflation rate +## Limiting Values of Inflation Rate We can compute the two prospective limiting values for $\overline \pi$ by studying the steady-state Laffer curve. @@ -203,7 +156,7 @@ print(f'The two steady state of π are: {π_l, π_u}') We find two steady state $\overline \pi$ values. -## Steady state Laffer curve +## Steady State Laffer curve The following figure plots the steady state Laffer curve together with the two stationary inflation rates. @@ -247,9 +200,16 @@ def plot_laffer(model, πs): plot_laffer(model, (π_l, π_u)) ``` -## Associated initial price levels +## Initial Price Levels + +Now that we have our hands on the two possible steady states, we can compute two functions $\underline p(m_0)$ and +$\overline p(m_0)$, which as initial conditions for $p_t$ at time $t$, imply that $\pi_t = \overline \pi $ for all $t \geq 0$. + +The function $\underline p(m_0)$ will be associated with $\pi_l$ the lower steady-state inflation rate. + +The function $\overline p(m_0)$ will be associated with $\pi_u$ the lower steady-state inflation rate. + -Now that we have our hands on the two possible steady states, we can compute two initial log price levels $p_0$, which as initial conditions, imply that $\pi_t = \overline \pi $ for all $t \geq 0$. ```{code-cell} ipython3 def solve_p0(p0, m0, α, g, π): @@ -312,7 +272,68 @@ eq_g = lambda x: np.exp(-model.α * x) - np.exp(-(1 + model.α) * x) print('eq_g == g:', np.isclose(eq_g(m_seq[-1] - m_seq[-2]), model.g)) ``` -## Slippery side of Laffer curve dynamics +## Computing an Equilibrium Sequence + +We'll deploy a method similar to *Method 2* used in {doc}`money_inflation`. + +We'll take the time $t$ state vector to be the pair $(m_t, p_t)$. + +We'll treat $m_t$ as a ``natural state variable`` and $p_t$ as a ``jump`` variable. + +Let + +$$ +\lambda \equiv \frac{\alpha}{1+ \alpha} +$$ + +Let's rewrite equation {eq}`eq:mdemand` as + +$$ +p_t = (1-\lambda) m_{t+1} + \lambda p_{t+1} +$$ (eq:mdemand2) + +We'll summarize our algorithm with the following pseudo-code. + +**Pseudo-code** + +The heart of the pseudo-code iterates on the following mapping from state vector $(m_t, p_t)$ at time $t$ +to state vector $(m_{t+1}, p_{t+1})$ at time $t+1$. + + +* starting from a given pair $(m_t, p_t)$ at time $t \geq 0$ + + * solve {eq}`eq:msupply` for $m_{t+1}$ + + * solve {eq}`eq:mdemand2` for $p_{t+1} = \lambda^{-1} p_t + (1 - \lambda^{-1}) m_{t+1}$ + + * compute the inflation rate $\pi_t = p_{t+1} - p_t$ and growth of money supply $\mu_t = m_{t+1} - m_t $ + +Next, compute the two functions $\underline p(m_0)$ and $\overline p(m_0)$ described above + +Now initiate the algorithm as follows. + + * set $m_0 >0$ + * set a value of $p_0 \in [\underline p(m_0), \overline p(m_0)]$ and form the pair $(m_0, p_0)$ at time $t =0$ + +Starting from $(m_0, p_0)$ iterate on $t$ to convergence of $\pi_t \rightarrow \overline \pi$ and $\mu_t \rightarrow \overline \mu$ + +It will turn out that + +* if they exist, limiting values $\overline \pi$ and $\overline \mu$ will be equal + +* if limiting values exist, there are two possible limiting values, one high, one low + +* for almost all initial log price levels $p_0$, the limiting $\overline \pi = \overline \mu$ is +the higher value + +* for each of the two possible limiting values $\overline \pi$ ,there is a unique initial log price level $p_0$ that implies that $\pi_t = \mu_t = \overline \mu$ for all $t \geq 0$ + + * this unique initial log price level solves $\log(\exp(m_0) + g \exp(p_0)) - p_0 = - \alpha \overline \pi $ + + * the preceding equation for $p_0$ comes from $m_1 - p_0 = - \alpha \overline \pi$ + + +## Slippery Side of Laffer Curve Dynamics We are now equipped to compute time series starting from different $p_0$ settings, like those in {doc}`money_inflation`. @@ -399,7 +420,7 @@ Those dynamics are "perverse" not only in the sense that they imply that the mon * the figure indicates that inflation can be *reduced* by running *higher* government deficits, i.e., by raising more resources through printing money. ```{note} -The same qualitive outcomes prevail in {doc}`money_inflation` that studies a linear version of the model in this lecture. +The same qualitative outcomes prevail in {doc}`money_inflation` that studies a linear version of the model in this lecture. ``` We discovered that diff --git a/lectures/monte_carlo.md b/lectures/monte_carlo.md index eecbd011..9f66c229 100644 --- a/lectures/monte_carlo.md +++ b/lectures/monte_carlo.md @@ -12,7 +12,7 @@ kernelspec: --- - +(monte-carlo)= # Monte Carlo and Option Pricing ## Overview @@ -49,7 +49,6 @@ from numpy.random import randn ``` - ## An introduction to Monte Carlo In this section we describe how Monte Carlo can be used to compute diff --git a/lectures/olg.md b/lectures/olg.md index 3fa56e38..6530e1be 100644 --- a/lectures/olg.md +++ b/lectures/olg.md @@ -155,7 +155,7 @@ The first-order condition for a maximum can be obtained by plugging $c_{t+1}$ into the objective function, taking the derivative with respect to $c_t$, and setting it to zero. -This leads to the **Euler equation** of the OLG model, which is +This leads to the **Euler equation** of the OLG model, which describes the optimal intertemporal consumption dynamics: ```{math} :label: euler_1_olg @@ -303,24 +303,6 @@ The next figure plots the supply of capital, as in [](saving_log_2_olg), as well (For the special case of log utility, supply does not depend on the interest rate, so we have a constant function.) -```{code-cell} ipython3 -R_vals = np.linspace(0.3, 1) -α, β = 0.5, 0.9 -w = 2.0 - -fig, ax = plt.subplots() - -ax.plot(R_vals, capital_demand(R_vals, α), - label="aggregate demand") -ax.plot(R_vals, capital_supply(R_vals, β, w), - label="aggregate supply") - -ax.set_xlabel("$R_{t+1}$") -ax.set_ylabel("$k_{t+1}$") -ax.legend() -plt.show() -``` - ## Equilibrium In this section we derive equilibrium conditions and investigate an example. @@ -409,15 +391,7 @@ ax.plot(R_vals, capital_supply(R_vals, β, w), R_e = equilibrium_R_log_utility(α, β, w) k_e = (β / (1 + β)) * w -ax.plot(R_e, k_e, 'go', ms=6, alpha=0.6) - -ax.annotate(r'equilibrium', - xy=(R_e, k_e), - xycoords='data', - xytext=(0, 60), - textcoords='offset points', - fontsize=12, - arrowprops=dict(arrowstyle="->")) +ax.plot(R_e, k_e, 'o',label='equilibrium') ax.set_xlabel("$R_{t+1}$") ax.set_ylabel("$k_{t+1}$") @@ -565,7 +539,7 @@ The interest rate reflects the marginal product of capital, which is high when c Previously, in our examples, we looked at the case of log utility. -Log utility is a rather special case. +Log utility is a rather special case of CRRA utility with $\gamma \to 1$. In this section, we are going to assume that $u(c) = \frac{ c^{1- \gamma}-1}{1-\gamma}$, where $\gamma >0, \gamma\neq 1$. diff --git a/lectures/prob_dist.md b/lectures/prob_dist.md index d8d04f7e..22defb18 100644 --- a/lectures/prob_dist.md +++ b/lectures/prob_dist.md @@ -4,14 +4,13 @@ jupytext: extension: .md format_name: myst format_version: 0.13 - jupytext_version: 1.14.5 + jupytext_version: 1.16.1 kernelspec: display_name: Python 3 (ipykernel) language: python name: python3 --- - # Distributions and Probabilities ```{index} single: Distributions and Probabilities @@ -23,6 +22,7 @@ In this lecture we give a quick introduction to data and probability distributio ```{code-cell} ipython3 :tags: [hide-output] + !pip install --upgrade yfinance ``` @@ -35,7 +35,6 @@ import scipy.stats import seaborn as sns ``` - ## Common distributions In this section we recall the definitions of some well-known distributions and explore how to manipulate them with SciPy. @@ -46,18 +45,22 @@ Let's start with discrete distributions. A discrete distribution is defined by a set of numbers $S = \{x_1, \ldots, x_n\}$ and a **probability mass function** (PMF) on $S$, which is a function $p$ from $S$ to $[0,1]$ with the property -$$ \sum_{i=1}^n p(x_i) = 1 $$ +$$ +\sum_{i=1}^n p(x_i) = 1 +$$ We say that a random variable $X$ **has distribution** $p$ if $X$ takes value $x_i$ with probability $p(x_i)$. That is, -$$ \mathbb P\{X = x_i\} = p(x_i) \quad \text{for } i= 1, \ldots, n $$ +$$ +\mathbb P\{X = x_i\} = p(x_i) \quad \text{for } i= 1, \ldots, n +$$ The **mean** or **expected value** of a random variable $X$ with distribution $p$ is $$ - \mathbb{E}[X] = \sum_{i=1}^n x_i p(x_i) +\mathbb{E}[X] = \sum_{i=1}^n x_i p(x_i) $$ Expectation is also called the *first moment* of the distribution. @@ -67,7 +70,7 @@ We also refer to this number as the mean of the distribution (represented by) $p The **variance** of $X$ is defined as $$ - \mathbb{V}[X] = \sum_{i=1}^n (x_i - \mathbb{E}[X])^2 p(x_i) +\mathbb{V}[X] = \sum_{i=1}^n (x_i - \mathbb{E}[X])^2 p(x_i) $$ Variance is also called the *second central moment* of the distribution. @@ -75,8 +78,8 @@ Variance is also called the *second central moment* of the distribution. The **cumulative distribution function** (CDF) of $X$ is defined by $$ - F(x) = \mathbb{P}\{X \leq x\} - = \sum_{i=1}^n \mathbb 1\{x_i \leq x\} p(x_i) +F(x) = \mathbb{P}\{X \leq x\} + = \sum_{i=1}^n \mathbb 1\{x_i \leq x\} p(x_i) $$ Here $\mathbb 1\{ \textrm{statement} \} = 1$ if "statement" is true and zero otherwise. @@ -95,7 +98,6 @@ n = 10 u = scipy.stats.randint(1, n+1) ``` - Here's the mean and variance: ```{code-cell} ipython3 @@ -115,7 +117,6 @@ u.pmf(1) u.pmf(2) ``` - Here's a plot of the probability mass function: ```{code-cell} ipython3 @@ -124,10 +125,11 @@ S = np.arange(1, n+1) ax.plot(S, u.pmf(S), linestyle='', marker='o', alpha=0.8, ms=4) ax.vlines(S, 0, u.pmf(S), lw=0.2) ax.set_xticks(S) +ax.set_xlabel('S') +ax.set_ylabel('PMF') plt.show() ``` - Here's a plot of the CDF: ```{code-cell} ipython3 @@ -136,13 +138,13 @@ S = np.arange(1, n+1) ax.step(S, u.cdf(S)) ax.vlines(S, 0, u.cdf(S), lw=0.2) ax.set_xticks(S) +ax.set_xlabel('S') +ax.set_ylabel('CDF') plt.show() ``` - The CDF jumps up by $p(x_i)$ at $x_i$. - ```{exercise} :label: prob_ex1 @@ -158,55 +160,56 @@ Check that your answers agree with `u.mean()` and `u.var()`. Another useful distribution is the Bernoulli distribution on $S = \{0,1\}$, which has PMF: $$ -p(x_i)= -\begin{cases} -p & \text{if $x_i = 1$}\\ -1-p & \text{if $x_i = 0$} -\end{cases} +p(i) = \theta^i (1 - \theta)^{1-i} +\qquad (i = 0, 1) $$ -Here $x_i \in S$ is the outcome of the random variable. +Here $\theta \in [0,1]$ is a parameter. + +We can think of this distribution as modeling probabilities for a random trial with success probability $\theta$. + +* $p(1) = \theta$ means that the trial succeeds (takes value 1) with probability $\theta$ +* $p(0) = 1 - \theta$ means that the trial fails (takes value 0) with + probability $1-\theta$ + +The formula for the mean is $\theta$, and the formula for the variance is $\theta(1-\theta)$. We can import the Bernoulli distribution on $S = \{0,1\}$ from SciPy like so: ```{code-cell} ipython3 -p = 0.4 -u = scipy.stats.bernoulli(p) +θ = 0.4 +u = scipy.stats.bernoulli(θ) ``` - -Here's the mean and variance: +Here's the mean and variance at $\theta=0.4$ ```{code-cell} ipython3 u.mean(), u.var() ``` -The formula for the mean is $p$, and the formula for the variance is $p(1-p)$. - - -Now let's evaluate the PMF: +We can evaluate the PMF as follows ```{code-cell} ipython3 -u.pmf(0) -u.pmf(1) +u.pmf(0), u.pmf(1) ``` - #### Binomial distribution Another useful (and more interesting) distribution is the **binomial distribution** on $S=\{0, \ldots, n\}$, which has PMF: $$ - p(i) = \binom{n}{i} \theta^i (1-\theta)^{n-i} +p(i) = \binom{n}{i} \theta^i (1-\theta)^{n-i} $$ -Here $\theta \in [0,1]$ is a parameter. +Again, $\theta \in [0,1]$ is a parameter. The interpretation of $p(i)$ is: the probability of $i$ successes in $n$ independent trials with success probability $\theta$. For example, if $\theta=0.5$, then $p(i)$ is the probability of $i$ heads in $n$ flips of a fair coin. -The mean and variance are: +The formula for the mean is $n \theta$ and the formula for the variance is $n \theta (1-\theta)$. + +Let's investigate an example ```{code-cell} ipython3 n = 10 @@ -214,11 +217,17 @@ n = 10 u = scipy.stats.binom(n, θ) ``` +According to our formulas, the mean and variance are + ```{code-cell} ipython3 -u.mean(), u.var() +n * θ, n * θ * (1 - θ) ``` -The formula for the mean is $n \theta$ and the formula for the variance is $n \theta (1-\theta)$. +Let's see if SciPy gives us the same results: + +```{code-cell} ipython3 +u.mean(), u.var() +``` Here's the PMF: @@ -232,10 +241,11 @@ S = np.arange(1, n+1) ax.plot(S, u.pmf(S), linestyle='', marker='o', alpha=0.8, ms=4) ax.vlines(S, 0, u.pmf(S), lw=0.2) ax.set_xticks(S) +ax.set_xlabel('S') +ax.set_ylabel('PMF') plt.show() ``` - Here's the CDF: ```{code-cell} ipython3 @@ -244,10 +254,11 @@ S = np.arange(1, n+1) ax.step(S, u.cdf(S)) ax.vlines(S, 0, u.cdf(S), lw=0.2) ax.set_xticks(S) +ax.set_xlabel('S') +ax.set_ylabel('CDF') plt.show() ``` - ```{exercise} :label: prob_ex3 @@ -267,6 +278,8 @@ u_sum = np.cumsum(u.pmf(S)) ax.step(S, u_sum) ax.vlines(S, 0, u_sum, lw=0.2) ax.set_xticks(S) +ax.set_xlabel('S') +ax.set_ylabel('CDF') plt.show() ``` @@ -275,35 +288,68 @@ We can see that the output graph is the same as the one above. ```{solution-end} ``` -#### Poisson distribution +#### Geometric distribution -Poisson distribution on $S = \{0, 1, \ldots\}$ with parameter $\lambda > 0$ has PMF +The geometric distribution has infinite support $S = \{0, 1, 2, \ldots\}$ and its PMF is given by $$ - p(i) = \frac{\lambda^i}{i!} e^{-\lambda} +p(i) = (1 - \theta)^i \theta $$ -The interpretation of $p(i)$ is: the probability of $i$ events in a fixed time interval, where the events occur at a constant rate $\lambda$ and independently of each other. +where $\theta \in [0,1]$ is a parameter + +(A discrete distribution has infinite support if the set of points to which it assigns positive probability is infinite.) + +To understand the distribution, think of repeated independent random trials, each with success probability $\theta$. + +The interpretation of $p(i)$ is: the probability there are $i$ failures before the first success occurs. + +It can be shown that the mean of the distribution is $1/\theta$ and the variance is $(1-\theta)/\theta$. + +Here's an example. -The mean and variance are: ```{code-cell} ipython3 -λ = 2 -u = scipy.stats.poisson(λ) +θ = 0.1 +u = scipy.stats.geom(θ) +u.mean(), u.var() ``` - + +Here's part of the PMF: + ```{code-cell} ipython3 -u.mean(), u.var() +fig, ax = plt.subplots() +n = 20 +S = np.arange(n) +ax.plot(S, u.pmf(S), linestyle='', marker='o', alpha=0.8, ms=4) +ax.vlines(S, 0, u.pmf(S), lw=0.2) +ax.set_xticks(S) +ax.set_xlabel('S') +ax.set_ylabel('PMF') +plt.show() ``` -The the expectation of Poisson distribution is $\lambda$ and the variance is also $\lambda$. +#### Poisson distribution -Here's the PMF: +The Poisson distribution on $S = \{0, 1, \ldots\}$ with parameter $\lambda > 0$ has PMF + +$$ +p(i) = \frac{\lambda^i}{i!} e^{-\lambda} +$$ + +The interpretation of $p(i)$ is: the probability of $i$ events in a fixed time interval, where the events occur independently at a constant rate $\lambda$. + +It can be shown that the mean is $\lambda$ and the variance is also $\lambda$. + +Here's an example. ```{code-cell} ipython3 λ = 2 u = scipy.stats.poisson(λ) +u.mean(), u.var() ``` +Here's the PMF: + ```{code-cell} ipython3 u.pmf(1) ``` @@ -314,21 +360,24 @@ S = np.arange(1, n+1) ax.plot(S, u.pmf(S), linestyle='', marker='o', alpha=0.8, ms=4) ax.vlines(S, 0, u.pmf(S), lw=0.2) ax.set_xticks(S) +ax.set_xlabel('S') +ax.set_ylabel('PMF') plt.show() ``` - ### Continuous distributions -Continuous distributions are represented by a **probability density function**, which is a function $p$ over $\mathbb R$ (the set of all real numbers) such that $p(x) \geq 0$ for all $x$ and +A continuous distribution is represented by a **probability density function**, which is a function $p$ over $\mathbb R$ (the set of all real numbers) such that $p(x) \geq 0$ for all $x$ and -$$ \int_{-\infty}^\infty p(x) dx = 1 $$ +$$ +\int_{-\infty}^\infty p(x) dx = 1 +$$ We say that random variable $X$ has distribution $p$ if $$ - \mathbb P\{a < X < b\} = \int_a^b p(x) dx +\mathbb P\{a < X < b\} = \int_a^b p(x) dx $$ for all $a \leq b$. @@ -338,14 +387,14 @@ The definition of the mean and variance of a random variable $X$ with distributi For example, the mean of $X$ is $$ - \mathbb{E}[X] = \int_{-\infty}^\infty x p(x) dx +\mathbb{E}[X] = \int_{-\infty}^\infty x p(x) dx $$ The **cumulative distribution function** (CDF) of $X$ is defined by $$ - F(x) = \mathbb P\{X \leq x\} - = \int_{-\infty}^x p(x) dx +F(x) = \mathbb P\{X \leq x\} + = \int_{-\infty}^x p(x) dx $$ @@ -354,15 +403,15 @@ $$ Perhaps the most famous distribution is the **normal distribution**, which has density $$ - p(x) = \frac{1}{\sqrt{2\pi}\sigma} - \exp\left(-\frac{(x-\mu)^2}{2\sigma^2}\right) +p(x) = \frac{1}{\sqrt{2\pi}\sigma} + \exp\left(-\frac{(x-\mu)^2}{2\sigma^2}\right) $$ -This distribution has two parameters, $\mu$ and $\sigma$. +This distribution has two parameters, $\mu \in \mathbb R$ and $\sigma \in (0, \infty)$. -It can be shown that, for this distribution, the mean is $\mu$ and the variance is $\sigma^2$. +Using calculus, it can be shown that, for this distribution, the mean is $\mu$ and the variance is $\sigma^2$. -We can obtain the moments, PDF and CDF of the normal density as follows: +We can obtain the moments, PDF and CDF of the normal density via SciPy as follows: ```{code-cell} ipython3 μ, σ = 0.0, 1.0 @@ -386,12 +435,12 @@ for μ, σ in zip(μ_vals, σ_vals): ax.plot(x_grid, u.pdf(x_grid), alpha=0.5, lw=2, label=f'$\mu={μ}, \sigma={σ}$') - +ax.set_xlabel('x') +ax.set_ylabel('PDF') plt.legend() plt.show() ``` - Here's a plot of the CDF: ```{code-cell} ipython3 @@ -402,27 +451,29 @@ for μ, σ in zip(μ_vals, σ_vals): alpha=0.5, lw=2, label=f'$\mu={μ}, \sigma={σ}$') ax.set_ylim(0, 1) +ax.set_xlabel('x') +ax.set_ylabel('CDF') plt.legend() plt.show() ``` - #### Lognormal distribution The **lognormal distribution** is a distribution on $\left(0, \infty\right)$ with density $$ - p(x) = \frac{1}{\sigma x \sqrt{2\pi}} - \exp \left(- \frac{\left(\log x - \mu\right)^2}{2 \sigma^2} \right) +p(x) = \frac{1}{\sigma x \sqrt{2\pi}} + \exp \left(- \frac{\left(\log x - \mu\right)^2}{2 \sigma^2} \right) $$ This distribution has two parameters, $\mu$ and $\sigma$. It can be shown that, for this distribution, the mean is $\exp\left(\mu + \sigma^2/2\right)$ and the variance is $\left[\exp\left(\sigma^2\right) - 1\right] \exp\left(2\mu + \sigma^2\right)$. -It has a nice interpretation: if $X$ is lognormally distributed, then $\log X$ is normally distributed. +It can be proved that -It is often used to model variables that are "multiplicative" in nature, such as income or asset prices. +* if $X$ is lognormally distributed, then $\log X$ is normally distributed, and +* if $X$ is normally distributed, then $\exp X$ is lognormally distributed. We can obtain the moments, PDF, and CDF of the lognormal density as follows: @@ -446,7 +497,8 @@ for μ, σ in zip(μ_vals, σ_vals): ax.plot(x_grid, u.pdf(x_grid), alpha=0.5, lw=2, label=f'$\mu={μ}, \sigma={σ}$') - +ax.set_xlabel('x') +ax.set_ylabel('PDF') plt.legend() plt.show() ``` @@ -461,21 +513,24 @@ for σ in σ_vals: label=f'$\mu={μ}, \sigma={σ}$') ax.set_ylim(0, 1) ax.set_xlim(0, 3) +ax.set_xlabel('x') +ax.set_ylabel('CDF') plt.legend() plt.show() ``` #### Exponential distribution -The **exponential distribution** is a distribution on $\left(0, \infty\right)$ with density +The **exponential distribution** is a distribution supported on $\left(0, \infty\right)$ with density $$ - p(x) = \lambda \exp \left( - \lambda x \right) +p(x) = \lambda \exp \left( - \lambda x \right) +\qquad (x > 0) $$ -This distribution has one parameter, $\lambda$. +This distribution has one parameter $\lambda$. -It is related to the Poisson distribution as it describes the distribution of the length of the time interval between two consecutive events in a Poisson process. +The exponential distribution can be thought of as the continuous analog of the geometric distribution. It can be shown that, for this distribution, the mean is $1/\lambda$ and the variance is $1/\lambda^2$. @@ -500,6 +555,8 @@ for λ in λ_vals: ax.plot(x_grid, u.pdf(x_grid), alpha=0.5, lw=2, label=f'$\lambda={λ}$') +ax.set_xlabel('x') +ax.set_ylabel('PDF') plt.legend() plt.show() ``` @@ -512,6 +569,8 @@ for λ in λ_vals: alpha=0.5, lw=2, label=f'$\lambda={λ}$') ax.set_ylim(0, 1) +ax.set_xlabel('x') +ax.set_ylabel('CDF') plt.legend() plt.show() ``` @@ -521,8 +580,8 @@ plt.show() The **beta distribution** is a distribution on $(0, 1)$ with density $$ - p(x) = \frac{\Gamma(\alpha + \beta)}{\Gamma(\alpha) \Gamma(\beta)} - x^{\alpha - 1} (1 - x)^{\beta - 1} +p(x) = \frac{\Gamma(\alpha + \beta)}{\Gamma(\alpha) \Gamma(\beta)} + x^{\alpha - 1} (1 - x)^{\beta - 1} $$ where $\Gamma$ is the [gamma function](https://en.wikipedia.org/wiki/Gamma_function). @@ -557,6 +616,8 @@ for α, β in zip(α_vals, β_vals): ax.plot(x_grid, u.pdf(x_grid), alpha=0.5, lw=2, label=fr'$\alpha={α}, \beta={β}$') +ax.set_xlabel('x') +ax.set_ylabel('PDF') plt.legend() plt.show() ``` @@ -569,18 +630,19 @@ for α, β in zip(α_vals, β_vals): alpha=0.5, lw=2, label=fr'$\alpha={α}, \beta={β}$') ax.set_ylim(0, 1) +ax.set_xlabel('x') +ax.set_ylabel('CDF') plt.legend() plt.show() ``` - #### Gamma distribution The **gamma distribution** is a distribution on $\left(0, \infty\right)$ with density $$ - p(x) = \frac{\beta^\alpha}{\Gamma(\alpha)} - x^{\alpha - 1} \exp(-\beta x) +p(x) = \frac{\beta^\alpha}{\Gamma(\alpha)} + x^{\alpha - 1} \exp(-\beta x) $$ This distribution has two parameters, $\alpha > 0$ and $\beta > 0$. @@ -614,6 +676,8 @@ for α, β in zip(α_vals, β_vals): ax.plot(x_grid, u.pdf(x_grid), alpha=0.5, lw=2, label=fr'$\alpha={α}, \beta={β}$') +ax.set_xlabel('x') +ax.set_ylabel('PDF') plt.legend() plt.show() ``` @@ -626,6 +690,8 @@ for α, β in zip(α_vals, β_vals): alpha=0.5, lw=2, label=fr'$\alpha={α}, \beta={β}$') ax.set_ylim(0, 1) +ax.set_xlabel('x') +ax.set_ylabel('CDF') plt.legend() plt.show() ``` @@ -653,7 +719,6 @@ df = pd.DataFrame(data, columns=['name', 'income']) df ``` - In this situation, we might refer to the set of their incomes as the "income distribution." The terminology is confusing because this set is not a probability distribution @@ -672,32 +737,33 @@ Suppose we have an observed distribution with values $\{x_1, \ldots, x_n\}$ The **sample mean** of this distribution is defined as $$ - \bar x = \frac{1}{n} \sum_{i=1}^n x_i +\bar x = \frac{1}{n} \sum_{i=1}^n x_i $$ The **sample variance** is defined as $$ - \frac{1}{n} \sum_{i=1}^n (x_i - \bar x)^2 +\frac{1}{n} \sum_{i=1}^n (x_i - \bar x)^2 $$ For the income distribution given above, we can calculate these numbers via ```{code-cell} ipython3 -x = np.asarray(df['income']) -``` - -```{code-cell} ipython3 +x = df['income'] x.mean(), x.var() ``` - ```{exercise} :label: prob_ex4 -Check that the formulas given above produce the same numbers. +If you try to check that the formulas given above for the sample mean and sample +variance produce the same numbers, you will see that the variance isn't quite +right. This is because SciPy uses $1/(n-1)$ instead of $1/n$ as the term at the +front of the variance. (Some books define the sample variance this way.) +Confirm. ``` + ### Visualization Let's look at different ways that we can visualize one or more observed distributions. @@ -708,26 +774,22 @@ We will cover - kernel density estimates and - violin plots -+++ {"user_expressions": []} #### Histograms -+++ {"user_expressions": []} - We can histogram the income distribution we just constructed as follows ```{code-cell} ipython3 -x = df['income'] fig, ax = plt.subplots() ax.hist(x, bins=5, density=True, histtype='bar') +ax.set_xlabel('income') +ax.set_ylabel('density') plt.show() ``` -+++ {"user_expressions": []} - Let's look at a distribution from real data. -In particular, we will look at the monthly return on Amazon shares between 2000/1/1 and 2023/1/1. +In particular, we will look at the monthly return on Amazon shares between 2000/1/1 and 2024/1/1. The monthly return is calculated as the percent change in the share price over each month. @@ -735,45 +797,43 @@ So we will have one observation for each month. ```{code-cell} ipython3 :tags: [hide-output] -df = yf.download('AMZN', '2000-1-1', '2023-1-1', interval='1mo' ) + +df = yf.download('AMZN', '2000-1-1', '2024-1-1', interval='1mo') prices = df['Adj Close'] -data = prices.pct_change()[1:] * 100 -data.head() +x_amazon = prices.pct_change()[1:] * 100 +x_amazon.head() ``` -+++ {"user_expressions": []} - The first observation is the monthly return (percent change) over January 2000, which was ```{code-cell} ipython3 -data[0] +x_amazon.iloc[0] ``` -+++ {"user_expressions": []} - Let's turn the return observations into an array and histogram it. -```{code-cell} ipython3 -x_amazon = np.asarray(data) -``` - ```{code-cell} ipython3 fig, ax = plt.subplots() ax.hist(x_amazon, bins=20) +ax.set_xlabel('monthly return (percent change)') +ax.set_ylabel('density') plt.show() ``` -+++ {"user_expressions": []} - #### Kernel density estimates -Kernel density estimate (KDE) is a non-parametric way to estimate and visualize the PDF of a distribution. +Kernel density estimates (KDE) provide a simple way to estimate and visualize the density of a distribution. + +If you are not familiar with KDEs, you can think of them as a smoothed +histogram. -KDE will generate a smooth curve that approximates the PDF. +Let's have a look at a KDE formed from the Amazon return data. ```{code-cell} ipython3 fig, ax = plt.subplots() sns.kdeplot(x_amazon, ax=ax) +ax.set_xlabel('monthly return (percent change)') +ax.set_ylabel('KDE') plt.show() ``` @@ -784,6 +844,8 @@ fig, ax = plt.subplots() sns.kdeplot(x_amazon, ax=ax, bw_adjust=0.1, alpha=0.5, label="bw=0.1") sns.kdeplot(x_amazon, ax=ax, bw_adjust=0.5, alpha=0.5, label="bw=0.5") sns.kdeplot(x_amazon, ax=ax, bw_adjust=1, alpha=0.5, label="bw=1") +ax.set_xlabel('monthly return (percent change)') +ax.set_ylabel('KDE') plt.legend() plt.show() ``` @@ -795,51 +857,53 @@ A suitable bandwidth is not too smooth (underfitting) or too wiggly (overfitting #### Violin plots -+++ {"user_expressions": []} -Yet another way to display an observed distribution is via a violin plot. +Another way to display an observed distribution is via a violin plot. ```{code-cell} ipython3 fig, ax = plt.subplots() ax.violinplot(x_amazon) +ax.set_ylabel('monthly return (percent change)') +ax.set_xlabel('KDE') plt.show() ``` -+++ {"user_expressions": []} - Violin plots are particularly useful when we want to compare different distributions. -For example, let's compare the monthly returns on Amazon shares with the monthly return on Apple shares. +For example, let's compare the monthly returns on Amazon shares with the monthly return on Costco shares. ```{code-cell} ipython3 :tags: [hide-output] -df = yf.download('AAPL', '2000-1-1', '2023-1-1', interval='1mo' ) + +df = yf.download('COST', '2000-1-1', '2024-1-1', interval='1mo') prices = df['Adj Close'] -data = prices.pct_change()[1:] * 100 -x_apple = np.asarray(data) +x_costco = prices.pct_change()[1:] * 100 ``` ```{code-cell} ipython3 fig, ax = plt.subplots() -ax.violinplot([x_amazon, x_apple]) +ax.violinplot([x_amazon, x_costco]) +ax.set_ylabel('monthly return (percent change)') +ax.set_xlabel('retailers') + +ax.set_xticks([1, 2]) +ax.set_xticklabels(['Amazon', 'Costco']) plt.show() ``` -+++ {"user_expressions": []} - ### Connection to probability distributions -+++ {"user_expressions": []} - Let's discuss the connection between observed distributions and probability distributions. Sometimes it's helpful to imagine that an observed distribution is generated by a particular probability distribution. For example, we might look at the returns from Amazon above and imagine that they were generated by a normal distribution. -Even though this is not true, it might be a helpful way to think about the data. +(Even though this is not true, it *might* be a helpful way to think about the data.) -Here we match a normal distribution to the Amazon monthly returns by setting the sample mean to the mean of the normal distribution and the sample variance equal to the variance. +Here we match a normal distribution to the Amazon monthly returns by setting the +sample mean to the mean of the normal distribution and the sample variance equal +to the variance. Then we plot the density and the histogram. @@ -855,17 +919,15 @@ x_grid = np.linspace(-50, 65, 200) fig, ax = plt.subplots() ax.plot(x_grid, u.pdf(x_grid)) ax.hist(x_amazon, density=True, bins=40) +ax.set_xlabel('monthly return (percent change)') +ax.set_ylabel('density') plt.show() ``` -+++ {"user_expressions": []} - -The match between the histogram and the density is not very bad but also not very good. +The match between the histogram and the density is not bad but also not very good. One reason is that the normal distribution is not really a good fit for this observed data --- we will discuss this point again when we talk about {ref}`heavy tailed distributions`. -+++ {"user_expressions": []} - Of course, if the data really *is* generated by the normal distribution, then the fit will be better. Let's see this in action @@ -882,12 +944,11 @@ x_grid = np.linspace(-4, 4, 200) fig, ax = plt.subplots() ax.plot(x_grid, u.pdf(x_grid)) ax.hist(x_draws, density=True, bins=40) +ax.set_xlabel('x') +ax.set_ylabel('density') plt.show() ``` -+++ {"user_expressions": []} - Note that if you keep increasing $N$, which is the number of observations, the fit will get better and better. This convergence is a version of the "law of large numbers", which we will discuss {ref}`later`. - diff --git a/lectures/scalar_dynam.md b/lectures/scalar_dynam.md index ab05d2a9..5926a3f6 100644 --- a/lectures/scalar_dynam.md +++ b/lectures/scalar_dynam.md @@ -220,11 +220,12 @@ For example, in a later lecture {doc}`solow`, we will study the Solow-Swan growt ```{math} :label: solow_lom2 -k_{t+1} = s z k_t^{\alpha} + (1 - \delta) k_t +k_{t+1} = s A k_t^{\alpha} + (1 - \delta) k_t ``` -Here $k$ is the per capita capital stock and $s, z, \alpha, \delta$ are positive -parameters with $0 < \alpha, \delta < 1$. +Here $k=K/L$ is the per capita capital stock, $s$ is the saving rate, $A$ is the total factor productivity, $\alpha$ is the capital share, and $\delta$ is the depreciation rate. + +All these parameter are positive and $0 < \alpha, \delta < 1$. If you try to iterate like we did in {eq}`sdslinmodpath`, you will find that the algebra gets messy quickly. @@ -293,12 +294,17 @@ $$ Obviously every globally stable steady state is also locally stable. -We will see examples below where the converse is not true. - +Here is an example where the converse is not true. +```{prf:example} +Consider the self-map $g$ on $\mathbb{R}$ defined by $g(x)=x^2$. The fixed point $1$ is not stable. +For example, $g^t (x)\to\infty$ for any $x>1$. +However, $0$ is locally stable, because $-1 0$. + with $a, b, \rho > 0$. + +Here, $\alpha$ is the output elasticity of capital and $\rho$ is a parameter that determines the elasticity of substitution between capital and labor. We assume a closed economy, so aggregate domestic investment equals aggregate domestic saving. @@ -81,6 +83,7 @@ Setting $k_t := K_t / L$ and using homogeneity of degree one now yields $$ k_{t+1} + = s \frac{F(K_t, L)}{L} + (1 - \delta) \frac{K_t}{L} = s \frac{F(K_t, L)}{L} + (1 - \delta) k_t = s F(k_t, 1) + (1 - \delta) k_t $$ diff --git a/lectures/time_series_with_matrices.md b/lectures/time_series_with_matrices.md index c809de9c..e73566ce 100644 --- a/lectures/time_series_with_matrices.md +++ b/lectures/time_series_with_matrices.md @@ -3,14 +3,16 @@ jupytext: text_representation: extension: .md format_name: myst + format_version: 0.13 + jupytext_version: 1.16.1 kernelspec: - display_name: Python 3 + display_name: Python 3 (ipykernel) language: python name: python3 --- (time_series_with_matrices)= -```{raw} html +```{raw} jupyter
QuantEcon @@ -26,13 +28,12 @@ This lecture uses matrices to solve some linear difference equations. As a running example, we’ll study a **second-order linear difference equation** that was the key technical tool in Paul Samuelson’s 1939 -article {cite}`Samuelson1939` that introduced the **multiplier-accelerator** model. +article {cite}`Samuelson1939` that introduced the *multiplier-accelerator model*. This model became the workhorse that powered early econometric versions of Keynesian macroeconomic models in the United States. -You can read about the details of that model in [this](https://python.quantecon.org/samuelson.html) -QuantEcon lecture. +You can read about the details of that model in {doc}`intermediate:samuelson`. (That lecture also describes some technicalities about second-order linear difference equations.) @@ -44,11 +45,16 @@ a "forward-looking" linear difference equation. We will use the following imports: -```{code-cell} ipython +```{code-cell} ipython3 import numpy as np import matplotlib.pyplot as plt from matplotlib import cm -plt.rcParams["figure.figsize"] = (11, 5) #set default figure size + +# Custom figsize for this lecture +plt.rcParams["figure.figsize"] = (11, 5) + +# Set decimal printing to 3 decimal places +np.set_printoptions(precision=3, suppress=True) ``` ## Samuelson's model @@ -64,20 +70,20 @@ y_{t} = \alpha_{0} + \alpha_{1} y_{t-1} + \alpha_{2} y_{t-2} ``` where we assume that $y_0$ and $y_{-1}$ are given numbers -that we take as **initial conditions**. +that we take as *initial conditions*. In Samuelson's model, $y_t$ stood for **national income** or perhaps a different measure of aggregate activity called **gross domestic product** (GDP) at time $t$. -Equation {eq}`tswm_1` is called a **second-order linear difference equation**. +Equation {eq}`tswm_1` is called a *second-order linear difference equation*. It is called second order because it depends on two lags. But actually, it is a collection of $T$ simultaneous linear equations in the $T$ variables $y_1, y_2, \ldots, y_T$. ```{note} To be able to solve a second-order linear difference -equation, we require two **boundary conditions** that can take the form -either of two **initial conditions** or two **terminal conditions** or +equation, we require two *boundary conditions* that can take the form +either of two *initial conditions*, two *terminal conditions* or possibly one of each. ``` @@ -131,42 +137,42 @@ The vector $y$ is a complete time path $\{y_t\}_{t=1}^T$. Let’s put Python to work on an example that captures the flavor of Samuelson’s multiplier-accelerator model. -We'll set parameters equal to the same values we used in [this QuantEcon lecture](https://python.quantecon.org/samuelson.html). +We'll set parameters equal to the same values we used in {doc}`intermediate:samuelson`. -```{code-cell} python3 +```{code-cell} ipython3 T = 80 # parameters -𝛼0 = 10.0 -𝛼1 = 1.53 -𝛼2 = -.9 +α_0 = 10.0 +α_1 = 1.53 +α_2 = -.9 -y_1 = 28. # y_{-1} -y0 = 24. +y_neg1 = 28.0 # y_{-1} +y_0 = 24.0 ``` Now we construct $A$ and $b$. -```{code-cell} python3 +```{code-cell} ipython3 A = np.identity(T) # The T x T identity matrix for i in range(T): if i-1 >= 0: - A[i, i-1] = -𝛼1 + A[i, i-1] = -α_1 if i-2 >= 0: - A[i, i-2] = -𝛼2 + A[i, i-2] = -α_2 -b = np.full(T, 𝛼0) -b[0] = 𝛼0 + 𝛼1 * y0 + 𝛼2 * y_1 -b[1] = 𝛼0 + 𝛼2 * y0 +b = np.full(T, α_0) +b[0] = α_0 + α_1 * y_0 + α_2 * y_neg1 +b[1] = α_0 + α_2 * y_0 ``` Let’s look at the matrix $A$ and the vector $b$ for our example. -```{code-cell} python3 +```{code-cell} ipython3 A, b ``` @@ -175,28 +181,34 @@ Now let’s solve for the path of $y$. If $y_t$ is GNP at time $t$, then we have a version of Samuelson’s model of the dynamics for GNP. -To solve $y = A^{-1} b$ we can either invert $A$ directly, as in +To solve $y = A^{-1} b$ we can either invert $A$ directly, as in -```{code-cell} python3 +```{code-cell} ipython3 A_inv = np.linalg.inv(A) y = A_inv @ b ``` -or we can use `np.linalg.solve`: +or we can use `np.linalg.solve`: - -```{code-cell} python3 +```{code-cell} ipython3 y_second_method = np.linalg.solve(A, b) ``` Here make sure the two methods give the same result, at least up to floating point precision: -```{code-cell} python3 +```{code-cell} ipython3 np.allclose(y, y_second_method) ``` +$A$ is invertible as it is lower triangular and [its diagonal entries are non-zero](https://www.statlect.com/matrix-algebra/triangular-matrix) + +```{code-cell} ipython3 +# Check if A is lower triangular +np.allclose(A, np.tril(A)) +``` + ```{note} In general, `np.linalg.solve` is more numerically stable than using `np.linalg.inv` directly. @@ -207,7 +219,7 @@ it directly. Now we can plot. -```{code-cell} python3 +```{code-cell} ipython3 plt.plot(np.arange(T)+1, y) plt.xlabel('t') plt.ylabel('y') @@ -215,7 +227,7 @@ plt.ylabel('y') plt.show() ``` -The **steady state** value $y^*$ of $y_t$ is obtained by setting $y_t = y_{t-1} = +The {ref}`*steady state*` value $y^*$ of $y_t$ is obtained by setting $y_t = y_{t-1} = y_{t-2} = y^*$ in {eq}`tswm_1`, which yields $$ @@ -225,21 +237,21 @@ $$ If we set the initial values to $y_{0} = y_{-1} = y^*$, then $y_{t}$ will be constant: -```{code-cell} python3 -y_star = 𝛼0 / (1 - 𝛼1 - 𝛼2) -y_1_steady = y_star # y_{-1} -y0_steady = y_star +```{code-cell} ipython3 +y_star = α_0 / (1 - α_1 - α_2) +y_neg1_steady = y_star # y_{-1} +y_0_steady = y_star -b_steady = np.full(T, 𝛼0) -b_steady[0] = 𝛼0 + 𝛼1 * y0_steady + 𝛼2 * y_1_steady -b_steady[1] = 𝛼0 + 𝛼2 * y0_steady +b_steady = np.full(T, α_0) +b_steady[0] = α_0 + α_1 * y_0_steady + α_2 * y_neg1_steady +b_steady[1] = α_0 + α_2 * y_0_steady ``` -```{code-cell} python3 +```{code-cell} ipython3 y_steady = A_inv @ b_steady ``` -```{code-cell} python3 +```{code-cell} ipython3 plt.plot(np.arange(T)+1, y_steady) plt.xlabel('t') plt.ylabel('y') @@ -250,7 +262,7 @@ plt.show() ## Adding a random term To generate some excitement, we'll follow in the spirit of the great economists -Eugen Slutsky and Ragnar Frisch and replace our original second-order difference +[Eugen Slutsky](https://en.wikipedia.org/wiki/Eugen_Slutsky) and [Ragnar Frisch](https://en.wikipedia.org/wiki/Ragnar_Frisch) and replace our original second-order difference equation with the following **second-order stochastic linear difference equation**: @@ -260,8 +272,8 @@ equation**: y_{t} = \alpha_{0} + \alpha_{1} y_{t-1} + \alpha_{2} y_{t-2} + u_t ``` -where $u_{t} \sim N\left(0, \sigma_{u}^{2}\right)$ and is IID, -meaning **independent** and **identically** distributed. +where $u_{t} \sim N\left(0, \sigma_{u}^{2}\right)$ and is {ref}`IID `, +meaning independent and identically distributed. We’ll stack these $T$ equations into a system cast in terms of matrix algebra. @@ -292,16 +304,13 @@ $$ (eq:eqma) Let’s try it out in Python. -```{code-cell} python3 -𝜎u = 2. -``` - -```{code-cell} python3 -u = np.random.normal(0, 𝜎u, size=T) +```{code-cell} ipython3 +σ_u = 2. +u = np.random.normal(0, σ_u, size=T) y = A_inv @ (b + u) ``` -```{code-cell} python3 +```{code-cell} ipython3 plt.plot(np.arange(T)+1, y) plt.xlabel('t') plt.ylabel('y') @@ -314,12 +323,12 @@ number of advanced countries in recent decades. We can simulate $N$ paths. -```{code-cell} python3 +```{code-cell} ipython3 N = 100 for i in range(N): col = cm.viridis(np.random.rand()) # Choose a random color from viridis - u = np.random.normal(0, 𝜎u, size=T) + u = np.random.normal(0, σ_u, size=T) y = A_inv @ (b + u) plt.plot(np.arange(T)+1, y, lw=0.5, color=col) @@ -332,12 +341,12 @@ plt.show() Also consider the case when $y_{0}$ and $y_{-1}$ are at steady state. -```{code-cell} python3 +```{code-cell} ipython3 N = 100 for i in range(N): col = cm.viridis(np.random.rand()) # Choose a random color from viridis - u = np.random.normal(0, 𝜎u, size=T) + u = np.random.normal(0, σ_u, size=T) y_steady = A_inv @ (b_steady + u) plt.plot(np.arange(T)+1, y_steady, lw=0.5, color=col) @@ -347,8 +356,6 @@ plt.ylabel('y') plt.show() ``` - - ## Computing population moments @@ -389,44 +396,48 @@ $$ Let's write a Python class that computes the mean vector $\mu_y$ and covariance matrix $\Sigma_y$. - - ```{code-cell} ipython3 class population_moments: """ - Compute population moments mu_y, Sigma_y. + Compute population moments μ_y, Σ_y. --------- Parameters: - alpha0, alpha1, alpha2, T, y_1, y0 + α_0, α_1, α_2, T, y_neg1, y_0 """ - def __init__(self, alpha0, alpha1, alpha2, T, y_1, y0, sigma_u): + def __init__(self, α_0=10.0, + α_1=1.53, + α_2=-.9, + T=80, + y_neg1=28.0, + y_0=24.0, + σ_u=1): # compute A A = np.identity(T) for i in range(T): if i-1 >= 0: - A[i, i-1] = -alpha1 + A[i, i-1] = -α_1 if i-2 >= 0: - A[i, i-2] = -alpha2 + A[i, i-2] = -α_2 # compute b - b = np.full(T, alpha0) - b[0] = alpha0 + alpha1 * y0 + alpha2 * y_1 - b[1] = alpha0 + alpha2 * y0 + b = np.full(T, α_0) + b[0] = α_0 + α_1 * y_0 + α_2 * y_neg1 + b[1] = α_0 + α_2 * y_0 # compute A inverse A_inv = np.linalg.inv(A) - self.A, self.b, self.A_inv, self.sigma_u, self.T = A, b, A_inv, sigma_u, T + self.A, self.b, self.A_inv, self.σ_u, self.T = A, b, A_inv, σ_u, T def sample_y(self, n): """ Give a sample of size n of y. """ - A_inv, sigma_u, b, T = self.A_inv, self.sigma_u, self.b, self.T - us = np.random.normal(0, sigma_u, size=[n, T]) + A_inv, σ_u, b, T = self.A_inv, self.σ_u, self.b, self.T + us = np.random.normal(0, σ_u, size=[n, T]) ys = np.vstack([A_inv @ (b + u) for u in us]) return ys @@ -435,20 +446,19 @@ class population_moments: """ Compute the population moments of y. """ - A_inv, sigma_u, b = self.A_inv, self.sigma_u, self.b + A_inv, σ_u, b = self.A_inv, self.σ_u, self.b - # compute mu_y - self.mu_y = A_inv @ b - self.Sigma_y = sigma_u**2 * (A_inv @ A_inv.T) + # compute μ_y + self.μ_y = A_inv @ b + self.Σ_y = σ_u**2 * (A_inv @ A_inv.T) + + return self.μ_y, self.Σ_y - return self.mu_y, self.Sigma_y - -my_process = population_moments( - alpha0=10.0, alpha1=1.53, alpha2=-.9, T=80, y_1=28., y0=24., sigma_u=1) +series_process = population_moments() -mu_y, Sigma_y = my_process.get_moments() -A_inv = my_process.A_inv +μ_y, Σ_y = series_process.get_moments() +A_inv = series_process.A_inv ``` It is enlightening to study the $\mu_y, \Sigma_y$'s implied by various parameter values. @@ -458,14 +468,14 @@ Among other things, we can use the class to exhibit how **statistical stationar Let's begin by generating $N$ time realizations of $y$ plotting them together with population mean $\mu_y$ . ```{code-cell} ipython3 -# plot mean +# Plot mean N = 100 for i in range(N): col = cm.viridis(np.random.rand()) # Choose a random color from viridis - ys = my_process.sample_y(N) + ys = series_process.sample_y(N) plt.plot(ys[i,:], lw=0.5, color=col) - plt.plot(mu_y, color='red') + plt.plot(μ_y, color='red') plt.xlabel('t') plt.ylabel('y') @@ -478,41 +488,49 @@ Visually, notice how the variance across realizations of $y_t$ decreases as $t$ Let's plot the population variance of $y_t$ against $t$. ```{code-cell} ipython3 -# plot variance -plt.plot(Sigma_y.diagonal()) +# Plot variance +plt.plot(Σ_y.diagonal()) plt.show() ``` -Notice how the population variance increases and asymptotes +Notice how the population variance increases and asymptotes. +++ -Let's print out the covariance matrix $\Sigma_y$ for a time series $y$ +Let's print out the covariance matrix $\Sigma_y$ for a time series $y$. ```{code-cell} ipython3 -my_process = population_moments(alpha0=0, alpha1=.8, alpha2=0, T=6, y_1=0., y0=0., sigma_u=1) - -mu_y, Sigma_y = my_process.get_moments() -print("mu_y = ",mu_y) -print("Sigma_y = ", Sigma_y) +series_process = population_moments(α_0=0, + α_1=.8, + α_2=0, + T=6, + y_neg1=0., + y_0=0., + σ_u=1) + +μ_y, Σ_y = series_process.get_moments() +print("μ_y = ", μ_y) +print("Σ_y = \n", Σ_y) ``` -Notice that the covariance between $y_t$ and $y_{t-1}$ -- the elements on the superdiagonal -- are **not** identical. +Notice that the covariance between $y_t$ and $y_{t-1}$ -- the elements on the superdiagonal -- are *not* identical. -This is is an indication that the time series represented by our $y$ vector is not **stationary**. +This is an indication that the time series represented by our $y$ vector is not **stationary**. -To make it stationary, we'd have to alter our system so that our **initial conditions** $(y_1, y_0)$ are not fixed numbers but instead a jointly normally distributed random vector with a particular mean and covariance matrix. +To make it stationary, we'd have to alter our system so that our *initial conditions* $(y_0, y_{-1})$ are not fixed numbers but instead a jointly normally distributed random vector with a particular mean and covariance matrix. -We describe how to do that in another lecture in this lecture [Linear State Space Models](https://python.quantecon.org/linear_models.html). +We describe how to do that in [Linear State Space Models](https://python.quantecon.org/linear_models.html). But just to set the stage for that analysis, let's print out the bottom right corner of $\Sigma_y$. ```{code-cell} ipython3 -mu_y, Sigma_y = my_process.get_moments() -print("bottom right corner of Sigma_y = \n", Sigma_y[72:,72:]) +series_process = population_moments() +μ_y, Σ_y = series_process.get_moments() + +print("bottom right corner of Σ_y = \n", Σ_y[72:,72:]) ``` -Please notice how the sub diagonal and super diagonal elements seem to have converged. +Please notice how the subdiagonal and superdiagonal elements seem to have converged. This is an indication that our process is asymptotically stationary. @@ -522,7 +540,6 @@ There is a lot to be learned about the process by staring at the off diagonal el +++ - ## Moving average representation Let's print out $A^{-1}$ and stare at its structure @@ -531,36 +548,19 @@ Let's print out $A^{-1}$ and stare at its structure To study the structure of $A^{-1}$, we shall print just up to $3$ decimals. -Let's begin by printing out just the upper left hand corner of $A^{-1}$ +Let's begin by printing out just the upper left hand corner of $A^{-1}$. ```{code-cell} ipython3 -with np.printoptions(precision=3, suppress=True): - print(A_inv[0:7,0:7]) +print(A_inv[0:7,0:7]) ``` - - - Evidently, $A^{-1}$ is a lower triangular matrix. - -Let's print out the lower right hand corner of $A^{-1}$ and stare at it. - -```{code-cell} ipython3 -with np.printoptions(precision=3, suppress=True): - print(A_inv[72:,72:]) -``` - - Notice how every row ends with the previous row's pre-diagonal entries. - - - - Since $A^{-1}$ is lower triangular, each row represents $ y_t$ for a particular $t$ as the sum of - a time-dependent function $A^{-1} b$ of the initial conditions incorporated in $b$, and -- a weighted sum of current and past values of the IID shocks $\{u_t\}$ +- a weighted sum of current and past values of the IID shocks $\{u_t\}$. Thus, let $\tilde{A}=A^{-1}$. @@ -575,18 +575,15 @@ This is a **moving average** representation with time-varying coefficients. Just as system {eq}`eq:eqma` constitutes a **moving average** representation for $y$, system {eq}`eq:eqar` constitutes an **autoregressive** representation for $y$. - - - ## A forward looking model -Samuelson’s model is **backwards looking** in the sense that we give it **initial conditions** and let it +Samuelson’s model is *backward looking* in the sense that we give it *initial conditions* and let it run. -Let’s now turn to model that is **forward looking**. +Let’s now turn to model that is *forward looking*. -We apply similar linear algebra machinery to study a **perfect -foresight** model widely used as a benchmark in macroeconomics and +We apply similar linear algebra machinery to study a *perfect +foresight* model widely used as a benchmark in macroeconomics and finance. As an example, we suppose that $p_t$ is the price of a stock and @@ -599,7 +596,7 @@ $$ y = A^{-1} \left(b + u\right) $$ -Our **perfect foresight** model of stock prices is +Our *perfect foresight* model of stock prices is $$ p_{t} = \sum_{j=0}^{T-t} \beta^{j} y_{t+j}, \quad \beta \in (0,1) @@ -634,34 +631,34 @@ y_{T} \end{array}\right] $$ -```{code-cell} python3 -𝛽 = .96 +```{code-cell} ipython3 +β = .96 ``` -```{code-cell} python3 +```{code-cell} ipython3 # construct B B = np.zeros((T, T)) for i in range(T): - B[i, i:] = 𝛽 ** np.arange(0, T-i) + B[i, i:] = β ** np.arange(0, T-i) ``` -```{code-cell} python3 -B +```{code-cell} ipython3 +print(B) ``` -```{code-cell} python3 -𝜎u = 0. -u = np.random.normal(0, 𝜎u, size=T) +```{code-cell} ipython3 +σ_u = 0. +u = np.random.normal(0, σ_u, size=T) y = A_inv @ (b + u) y_steady = A_inv @ (b_steady + u) ``` -```{code-cell} python3 +```{code-cell} ipython3 p = B @ y ``` -```{code-cell} python3 +```{code-cell} ipython3 plt.plot(np.arange(0, T)+1, y, label='y') plt.plot(np.arange(0, T)+1, p, label='p') plt.xlabel('t') @@ -676,7 +673,7 @@ Can you explain why the trend of the price is downward over time? Also consider the case when $y_{0}$ and $y_{-1}$ are at the steady state. -```{code-cell} python3 +```{code-cell} ipython3 p_steady = B @ y_steady plt.plot(np.arange(0, T)+1, y_steady, label='y') @@ -687,4 +684,3 @@ plt.legend() plt.show() ``` - diff --git a/lectures/unpleasant.md b/lectures/unpleasant.md index 3e57c1bf..676edfa1 100644 --- a/lectures/unpleasant.md +++ b/lectures/unpleasant.md @@ -11,14 +11,14 @@ kernelspec: name: python3 --- -# Unpleasant Monetarist Arithmetic +# Some Unpleasant Monetarist Arithmetic ## Overview -This lecture builds on concepts and issues introduced in our lecture on **Money Supplies and Price Levels**. +This lecture builds on concepts and issues introduced in {doc}`money_inflation`. -That lecture describes stationary equilibria that reveal a **Laffer curve** in the inflation tax rate and the associated stationary rate of return +That lecture describes stationary equilibria that reveal a [*Laffer curve*](https://en.wikipedia.org/wiki/Laffer_curve) in the inflation tax rate and the associated stationary rate of return on currency. In this lecture we study a situation in which a stationary equilibrium prevails after date $T > 0$, but not before then. @@ -34,20 +34,18 @@ The critical **money-to-bonds** ratio stabilizes only at time $T$ and afterwards And the larger is $T$, the higher is the gross-of-interest government deficit that must be financed by printing money at times $t \geq T$. -These outcomes are the essential finding of Sargent and Wallace's **unpleasant monetarist arithmetic** {cite}`sargent1981`. - -**Reader's Guide:** Please read our lecture on Money Supplies and Price levels before diving into this lecture. +These outcomes are the essential finding of Sargent and Wallace's "unpleasant monetarist arithmetic" {cite}`sargent1981`. That lecture described supplies and demands for money that appear in lecture. It also characterized the steady state equilibrium from which we work backwards in this lecture. -In addition to learning about ''unpleasant monetarist arithmetic", in this lecture we'll learn how to implement a **fixed point** algorithm for computing an initial price level. +In addition to learning about "unpleasant monetarist arithmetic", in this lecture we'll learn how to implement a [*fixed point*](https://en.wikipedia.org/wiki/Fixed_point_(mathematics)) algorithm for computing an initial price level. ## Setup -Let's start with quick reminders of the model's components set out in our lecture on **Money Supplies and Price Levels**. +Let's start with quick reminders of the model's components set out in {doc}`money_inflation`. Please consult that lecture for more details and Python code that we'll also use in this lecture. @@ -75,9 +73,11 @@ $$ b_t = \gamma_1 - \gamma_2 R_t^{-1} . $$ (eq:up_bdemand) +where $\gamma_1 > \gamma_2 > 0$. + ## Monetary-Fiscal Policy -To the basic model of our lecture on **Money Supplies and Price Levels**, we add inflation-indexed one-period government bonds as an additional way for the government to finance government expenditures. +To the basic model of {doc}`money_inflation`, we add inflation-indexed one-period government bonds as an additional way for the government to finance government expenditures. Let $\widetilde R > 1$ be a time-invariant gross real rate of return on government one-period inflation-indexed bonds. @@ -91,13 +91,13 @@ $$ Just before the beginning of time $0$, the public owns $\check m_0$ units of currency (measured in dollars) and $\widetilde R \check B_{-1}$ units of one-period indexed bonds (measured in time $0$ goods); these two quantities are initial conditions set outside the model. -Notice that $\check m_0$ is a **nominal** quantity, being measured in dollar, while -$\widetilde R \check B_{-1}$ is a **real** quantity, being measured in time $0$ goods. +Notice that $\check m_0$ is a *nominal* quantity, being measured in dollars, while +$\widetilde R \check B_{-1}$ is a *real* quantity, being measured in time $0$ goods. ### Open market operations -At time $0$, government can rearrange its portolio of debts with subject to the following constraint (on open-market operations): +At time $0$, government can rearrange its portfolio of debts subject to the following constraint (on open-market operations): $$ \widetilde R B_{-1} + \frac{m_0}{p_0} = \widetilde R \check B_{-1} + \frac{\check m_0}{p_0} @@ -109,14 +109,14 @@ $$ B_{-1} - \check B_{-1} = \frac{1}{p_0 \widetilde R} \left( \check m_0 - m_0 \right) $$ (eq:openmarketconstraint) -This equation says that the government (e.g., the central bank) can **decrease** $m_0$ relative to -$\check m_0$ by **increasing** $B_{-1}$ relative to $\check B_{-1}$. +This equation says that the government (e.g., the central bank) can *decrease* $m_0$ relative to +$\check m_0$ by *increasing* $B_{-1}$ relative to $\check B_{-1}$. -This is a version of a standard constraint on a central bank's **open market operations** in which it expands the stock of money by buying government bonds from the public. +This is a version of a standard constraint on a central bank's [**open market operations**](https://www.federalreserve.gov/monetarypolicy/openmarket.htm) in which it expands the stock of money by buying government bonds from the public. ## An open market operation at $t=0$ -Following Sargent and Wallace (1981), we analyze consequences of a central bank policy that +Following Sargent and Wallace {cite}`sargent1981`, we analyze consequences of a central bank policy that uses an open market operation to lower the price level in the face of a persistent fiscal deficit that takes the form of a positive $g$. @@ -152,7 +152,7 @@ running monetary and fiscal policies. Here, by **fiscal policy** we mean the collection of actions that determine a sequence of net-of-interest government deficits $\{g_t\}_{t=0}^\infty$ that must be financed by issuing to the public either money or interest bearing bonds. -By **monetary policy** or **debt-management polcy**, we mean the collection of actions that determine how the government divides its portolio of debts to the public between interest-bearing parts (government bonds) and non-interest-bearing parts (money). +By **monetary policy** or **debt-management policy**, we mean the collection of actions that determine how the government divides its portfolio of debts to the public between interest-bearing parts (government bonds) and non-interest-bearing parts (money). By an **open market operation**, we mean a government monetary policy action in which the government (or its delegate, say, a central bank) either buys government bonds from the public for newly issued money, or sells bonds to the public and withdraws the money it receives from public circulation. @@ -160,7 +160,7 @@ By an **open market operation**, we mean a government monetary policy action in ## Algorithm (basic idea) -We work backwards from $t=T$ and first compute $p_T, R_u$ associated with the low-inflation, low-inflation-tax-rate stationary equilibrium of our lecture on the dynamic Laffer curve for the inflation tax. +We work backwards from $t=T$ and first compute $p_T, R_u$ associated with the low-inflation, low-inflation-tax-rate stationary equilibrium in {doc}`money_inflation_nonlinear`. To start our description of our algorithm, it is useful to recall that a stationary rate of return on currency $\bar R$ solves the quadratic equation @@ -171,7 +171,7 @@ $$ (eq:up_steadyquadratic) Quadratic equation {eq}`eq:up_steadyquadratic` has two roots, $R_l < R_u < 1$. -For reasons described at the end of **this lecture**, we select the larger root $R_u$. +For reasons described at the end of {doc}`money_inflation`, we select the larger root $R_u$. Next, we compute @@ -211,7 +211,7 @@ We want to compute $$ \begin{aligned} -p_0 & = \gamma_1^{-1} \left[ \sum_{j=0}^\infty \lambda^j m_{1+j} \right] \cr +p_0 & = \gamma_1^{-1} \left[ \sum_{j=0}^\infty \lambda^j m_{j} \right] \cr & = \gamma_1^{-1} \left[ \sum_{j=0}^{T-1} \lambda^j m_{0} + \sum_{j=T}^\infty \lambda^j m_{1+j} \right] \end{aligned} $$ @@ -240,21 +240,23 @@ $$ p_T = \frac{m_0}{\gamma_1 - \overline g - \gamma_2 R_u^{-1}} = \gamma_1^{-1} m_0 \left\{\frac{1}{R_u-\lambda} \right\} $$ (eq:pTformula) -**Remark:** +```{prf:remark} We can verify the equivalence of the two formulas on the right sides of {eq}`eq:pTformula` by recalling that $R_u$ is a root of the quadratic equation {eq}`eq:up_steadyquadratic` that determines steady state rates of return on currency. +``` ## Algorithm (pseudo code) Now let's describe a computational algorithm in more detail in the form of a description -that constitutes ''pseudo code'' because it approaches a set of instructions we could provide to a +that constitutes pseudo code because it approaches a set of instructions we could provide to a Python coder. To compute an equilibrium, we deploy the following algorithm. -Given **parameters** include $g, \check m_0, \check B_{-1}, \widetilde R >1, T $ +```{prf:algorithm} +Given *parameters* include $g, \check m_0, \check B_{-1}, \widetilde R >1, T $. -We define a mapping from $p_0$ to $p_0$ as follows. +We define a mapping from $p_0$ to $\widehat p_0$ as follows. * Set $m_0$ and then compute $B_{-1}$ to satisfy the constraint on time $0$ **open market operations** @@ -278,8 +280,7 @@ $$ * Compute $R_u, p_T$ from formulas {eq}`eq:up_steadyquadratic` and {eq}`eq:LafferTstationary` above -* Compute a new estimate of $p_0$, call it $\widehat p_0$, from equation {eq}`eq:allts` above - +* Compute a new estimate of $p_0$, call it $\widehat p_0$, from equation {eq}`eq:allts` above * Note that the preceding steps define a mapping @@ -296,12 +297,12 @@ p_{0,j+1} = (1-\theta) {\mathcal S}(p_{0,j}) + \theta p_{0,j}, $$ where $\theta \in [0,1)$ is a relaxation parameter. - +``` ## Example Calculations We'll set parameters of the model so that the steady state after time $T$ is initially the same -as in our lecture on "Money and Inflation". +as in {doc}`money_inflation_nonlinear` In particular, we set $\gamma_1=100, \gamma_2 =50, g=3.0$. We set $m_0 = 100$ in that lecture, but now the counterpart will be $M_T$, which is endogenous. @@ -314,9 +315,9 @@ These parameter settings mean that just before time $0$, the "central bank" sell That leaves the public with less currency but more government interest-bearing bonds. -Since the public has less currency (it's supply has diminished) it is plausible to anticipate that the price level at time $0$ will be driven downward. +Since the public has less currency (its supply has diminished) it is plausible to anticipate that the price level at time $0$ will be driven downward. -But that is not the end of the story, because this ''open market operation'' at time $0$ has consequences for future settings of $m_{t+1}$ and the gross-of-interest government deficit $\bar g_t$. +But that is not the end of the story, because this **open market operation** at time $0$ has consequences for future settings of $m_{t+1}$ and the gross-of-interest government deficit $\bar g_t$. Let's start with some imports: @@ -327,7 +328,7 @@ import matplotlib.pyplot as plt from collections import namedtuple ``` -Now let's dive in and implement our ''pseudo code'' in Python. +Now let's dive in and implement our pseudo code in Python. ```{code-cell} ipython3 # Create a namedtuple that contains parameters @@ -393,14 +394,15 @@ def compute_fixed_point(m0, p0_guess, model, θ=0.5, tol=1e-6): return p0 ``` + Let's look at how price level $p_0$ in the stationary $R_u$ equilibrium depends on the initial money supply $m_0$. Notice that the slope of $p_0$ as a function of $m_0$ is constant. -This outcome indicates that our model verifies a ''quantity theory of money'' outcome, +This outcome indicates that our model verifies a quantity theory of money outcome, something that Sargent and Wallace {cite}`sargent1981` purposefully built into their model to justify -the adjective **monetarist** in their title. +the adjective *monetarist* in their title. ```{code-cell} ipython3 @@ -416,7 +418,7 @@ plt.xlabel('$m_0$') plt.show() ``` -Now let's write and implement code that let's us experiment with the time $0$ open market operation described earlier. +Now let's write and implement code that lets us experiment with the time $0$ open market operation described earlier. ```{code-cell} ipython3 def simulate(m0, model, length=15, p0_guess=1): @@ -469,25 +471,16 @@ def simulate(m0, model, length=15, p0_guess=1): def plot_path(m0_arr, model, length=15): fig, axs = plt.subplots(2, 2, figsize=(8, 5)) - + titles = ['$p_t$', '$m_t$', '$b_t$', '$R_t$'] + for m0 in m0_arr: - paths = simulate(m0, msm, length=length) - - axs[0, 0].plot(paths[0]) - axs[0, 0].set_title('$p_t$') - - axs[0, 1].plot(paths[1]) - axs[0, 1].set_title('$m_t$') - - axs[1, 0].plot(paths[2]) - axs[1, 0].set_title('$b_t$') - - axs[1, 1].plot(paths[3]) - axs[1, 1].set_title('$R_t$') - - axs[0, 1].hlines(model.m0_check, 0, length, - color='r', linestyle='--') - axs[0, 1].text(length*0.8, model.m0_check*0.9, '$\check{m}_0$') + paths = simulate(m0, model, length=length) + for i, ax in enumerate(axs.flat): + ax.plot(paths[i]) + ax.set_title(titles[i]) + + axs[0, 1].hlines(model.m0_check, 0, length, color='r', linestyle='--') + axs[0, 1].text(length * 0.8, model.m0_check * 0.9, '$\check{m}_0$') plt.show() ``` @@ -501,11 +494,12 @@ mystnb: plot_path([80, 100], msm) ``` -Figure {numref}`fig:unpl1` summarizes outcomes of two experiments that convey messages of -Sargent and Wallace's **unpleasant monetarist arithmetic** {cite}`sargent1981`. +{numref}`fig:unpl1` summarizes outcomes of two experiments that convey messages of Sargent and Wallace {cite}`sargent1981`. * An open market operation that reduces the supply of money at time $t=0$ reduces the price level at time $t=0$ * The lower is the post-open-market-operation money supply at time $0$, lower is the price level at time $0$. -* An open market operation that reduces the post-open-market-operation money supply at time $0$ also **lowers** the rate of return on money $R_u$ at times $t \geq T$ because it brings a higher gross-of-interest government deficit that must be financed by printing money (i.e., levying an inflation tax) at time $t \geq T$. +* An open market operation that reduces the post open market operation money supply at time $0$ also *lowers* the rate of return on money $R_u$ at times $t \geq T$ because it brings a higher gross of interest government deficit that must be financed by printing money (i.e., levying an inflation tax) at time $t \geq T$. + +* $R$ is important in the context of maintaining monetary stability and addressing the consequences of increased inflation due to government deficits. Thus, a larger $R$ might be chosen to mitigate the negative impacts on the real rate of return caused by inflation.