From f4cb3f72ad1fe326c29826a02f42ca2df023691b Mon Sep 17 00:00:00 2001 From: Humphrey Yang Date: Tue, 19 Aug 2025 21:27:34 +1000 Subject: [PATCH 01/14] update exercises --- lectures/likelihood_ratio_process_2.md | 350 ++++++++++++++++++++++++- 1 file changed, 349 insertions(+), 1 deletion(-) diff --git a/lectures/likelihood_ratio_process_2.md b/lectures/likelihood_ratio_process_2.md index 0fad9ebd9..922d7f4de 100644 --- a/lectures/likelihood_ratio_process_2.md +++ b/lectures/likelihood_ratio_process_2.md @@ -4,7 +4,7 @@ jupytext: extension: .md format_name: myst format_version: 0.13 - jupytext_version: 1.17.1 + jupytext_version: 1.17.2 kernelspec: display_name: Python 3 (ipykernel) language: python @@ -928,3 +928,351 @@ $$ ```{solution-end} ``` + +```{exercise} +:label: lr_ex4 + +In this exercise, we will implement the Blume-Easley model with learning agents. + +Consider the two models + +$$ +f(s^t) = f(s_1) f(s_2) \cdots f(s_t) +$$ + +and + +$$ +g(s^t) = g(s_1) g(s_2) \cdots g(s_t) +$$ + +and associated likelihood ratio process + +$$ +L(s^t) = \frac{f(s^t)}{g(s^t)} +$$ + +Let $\pi_0 \in (0,1)$ be a prior probability and + +$$ +\pi_t = \frac{ \pi_0 L(s^t)}{ \pi_0 L(s^t) + (1-\pi_0) } +$$ + +Now consider the mixture model + +$$ +m(s^t) = \pi(s^t) f(s^t) + (1- \pi(s^t)) g(s^t) +$$ (eq:be_mix_model) + +Now consider the environment in our Blume-Easley lecture. + +We'll endow each type of consumer with model {eq}`eq:be_mix_model`. + + * The two agents share the same $f$ and $g$, but + * they have different initial priors, say $\pi_0^1$ and $\pi_0^2$ + +Thus, consumer $i$'s probability model is + +$$ +m^i(s^t) = \pi^i(s^t) f(s^t) + (1- \pi^i(s^t)) g(s^t) \tag{4} +$$ + +The idea is to hand probability models (4) for $i=1,2$ to the social planner in the Blume-Easley lecture, deduce allocation $c^i(s^t), i = 1,2$, and watch what happens when + + * nature's model is $f$ + * nature's model is $g$ + +Both consumers will eventually learn the "truth", but one of them will learn faster. + +Questions: +1. How do their consumption shares evolve? +2. Which agent learns faster when nature follows $f$? When nature follows $g$? +3. How does the difference in initial priors $\pi_0^1$ and $\pi_0^2$ affect the convergence speed? + +In the exercise below, set $f \sim \text{Beta}(1.5, 1)$ and $g \sim \text{Beta}(1, 1.5)$. + +``` + +```{solution-start} lr_ex4 +:class: dropdown +``` + +Here is one solution. + +First, let's set up the model with learning agents: + +```{code-cell} ipython3 +def bayesian_update(π_0, L_t): + """ + Bayesian update of belief probability given likelihood ratio. + """ + return (π_0 * L_t) / (π_0 * L_t + (1 - π_0)) + +def mixture_density_belief(s_seq, f_func, g_func, π_seq): + """ + Compute the mixture density beliefs m^i(s^t) for agent i. + """ + f_vals = f_func(s_seq) + g_vals = g_func(s_seq) + return π_seq * f_vals + (1 - π_seq) * g_vals +``` + +Now let's implement the learning Blume-Easley simulation: + +```{code-cell} ipython3 +def simulate_learning_blume_easley(sequences, f_belief, g_belief, + π_0_1, π_0_2, λ=0.5): + """ + Simulate Blume-Easley model with learning agents. + """ + N, T = sequences.shape + + # Initialize arrays to store results + π_1_seq = np.empty((N, T)) + π_2_seq = np.empty((N, T)) + c1_share = np.empty((N, T)) + l_agents_seq = np.empty((N, T)) + + π_1_seq[:, 0] = π_0_1 + π_2_seq[:, 0] = π_0_2 + + for n in range(N): + # Initialize cumulative likelihood ratio for beliefs + L_cumul = 1.0 + + # Initialize likelihood ratio between agent densities + l_agents_cumul = 1.0 + + for t in range(1, T): + s_t = sequences[n, t] + + # Compute likelihood ratio for this observation + l_t = f_belief(s_t) / g_belief(s_t) + + # Update cumulative likelihood ratio + L_cumul *= l_t + + # Bayesian update of beliefs + π_1_t = bayesian_update(π_0_1, L_cumul) + π_2_t = bayesian_update(π_0_2, L_cumul) + + # Store beliefs + π_1_seq[n, t] = π_1_t + π_2_seq[n, t] = π_2_t + + # Compute mixture densities for each agent + m1_t = π_1_t * f_belief(s_t) + (1 - π_1_t) * g_belief(s_t) + m2_t = π_2_t * f_belief(s_t) + (1 - π_2_t) * g_belief(s_t) + + # Update cumulative likelihood ratio between agents + l_agents_cumul *= (m1_t / m2_t) + l_agents_seq[n, t] = l_agents_cumul + + # c_t^1(s^t) = λ * l_t(s^t) / (1 - λ + λ * l_t(s^t)) + # where l_t(s^t) is the cumulative likelihood ratio between agents + c1_share[n, t] = λ * l_agents_cumul / (1 - λ + λ * l_agents_cumul) + + return { + 'π_1': π_1_seq, + 'π_2': π_2_seq, + 'c1_share': c1_share, + 'l_agents': l_agents_seq + } +``` + +Let's run simulations for different scenarios. + +We use $\lambda = 0.5$, $T=40$, and $N=1000$. + +```{code-cell} ipython3 +λ = 0.5 +T = 40 +N = 1000 + +F_a, F_b = 1.5, 1 +G_a, G_b = 1, 1.5 + +f = jit(lambda x: p(x, F_a, F_b)) +g = jit(lambda x: p(x, G_a, G_b)) +``` + +We start the $\pi^i_0 \in (0, 1)$ from different starting points and widen the gap + +```{code-cell} ipython3 +# Different initial priors +π_0_scenarios = [ + (0.3, 0.7), + (0.7, 0.3), + (0.1, 0.9), +] +``` + +Now we can run simulations for different scenarios + +```{code-cell} ipython3 +# Nature follows f +s_seq_f = np.random.beta(F_a, F_b, (N, T)) + +# Nature follows g +s_seq_g = np.random.beta(G_a, G_b, (N, T)) + +results_f = {} +results_g = {} + +for i, (π_0_1, π_0_2) in enumerate(π_0_scenarios): + # When nature follows f + results_f[i] = simulate_learning_blume_easley( + s_seq_f, f, g, π_0_1, π_0_2, λ) + # When nature follows g + results_g[i] = simulate_learning_blume_easley( + s_seq_g, f, g, π_0_1, π_0_2, λ) +``` + +Now let's visualize the results + +```{code-cell} ipython3 +def plot_learning_results(results, π_0_scenarios, nature_type, truth_value): + """ + Plot beliefs and consumption shares for learning agents. + """ + + fig, axes = plt.subplots(3, 2, figsize=(10, 15)) + + scenario_labels = [ + rf'$\pi_0^1 = {π_0_1}, \pi_0^2 = {π_0_2}$' + for π_0_1, π_0_2 in π_0_scenarios + ] + + for row, (scenario_idx, scenario_label) in enumerate( + zip(range(3), scenario_labels)): + + res = results[scenario_idx] + + # Plot beliefs + ax = axes[row, 0] + π_1_med = np.median(res['π_1'], axis=0) + π_2_med = np.median(res['π_2'], axis=0) + ax.plot(π_1_med, 'C0', label=r'$\pi_1^t$ (agent 1)', linewidth=2) + ax.plot(π_2_med, 'C1', label=r'$\pi_2^t$ (agent 2)', linewidth=2) + ax.axhline(y=truth_value, color='gray', linestyle='--', + alpha=0.5, label=f'truth ({nature_type})') + ax.set_title(f'beliefs when nature = {nature_type}\n{scenario_label}') + ax.set_ylabel('belief probability') + ax.set_ylim([-0.05, 1.05]) + ax.legend() + + # Plot consumption shares + ax = axes[row, 1] + c1_med = np.median(res['c1_share'], axis=0) + ax.plot(c1_med, 'g-', linewidth=2, label='agent 1 consumption share') + ax.axhline(y=0.5, color='gray', linestyle='--', + alpha=0.5, label='equal split') + ax.set_title(f'consumption when nature = {nature_type}') + ax.set_ylabel('agent 1 share') + ax.set_ylim([0, 1]) + ax.legend() + + # Add x-labels + for col in range(2): + axes[row, col].set_xlabel('time') + + plt.tight_layout() + return fig, axes +``` + +Now use the function to plot results when nature follows f: + +```{code-cell} ipython3 +fig_f, axes_f = plot_learning_results( + results_f, π_0_scenarios, 'f', 1.0) +plt.show() +``` + +We can see that the agent with more "accurate" belief gets higher consumption share. + +Moreover, the further the initial beliefs are, the longer it takes for the consumption ratio to converge. + +The time it takes for the "less accurate" agent costs their share in future consumption. + +Now plot results when nature follows g: + +```{code-cell} ipython3 +fig_g, axes_g = plot_learning_results(results_g, π_0_scenarios, 'g', 0.0) +plt.show() +``` + +We observe a similar but symmetrical pattern. + +```{solution-end} +``` + +```{exercise} +:label: lr_ex5 + +In the previous exercise, we specifically set the two beta distributions to be relatively close to each other. + +That is to say, it is harder to distinguish between the two distributions. + +Now let's explore an alternative scenario where the two distributions are further apart. + +Specifically, we set $f \sim \text{Beta}(2, 5)$ and $g \sim \text{Beta}(5, 2)$. + +Try to compare the learning dynamics in this scenario with the previous one using the simulation code we developed earlier. +``` + +```{solution-start} lr_ex5 +:class: dropdown +``` + +Here is one solution + +```{code-cell} ipython3 +λ = 0.5 +T = 40 +N = 1000 + +F_a, F_b = 2, 5 +G_a, G_b = 5, 2 + +f = jit(lambda x: p(x, F_a, F_b)) +g = jit(lambda x: p(x, G_a, G_b)) + +π_0_scenarios = [ + (0.3, 0.7), + (0.7, 0.3), + (0.1, 0.9), +] + +s_seq_f = np.random.beta(F_a, F_b, (N, T)) +s_seq_g = np.random.beta(G_a, G_b, (N, T)) + +results_f = {} +results_g = {} + +for i, (π_0_1, π_0_2) in enumerate(π_0_scenarios): + # When nature follows f + results_f[i] = simulate_learning_blume_easley( + s_seq_f, f, g, π_0_1, π_0_2, λ) + # When nature follows g + results_g[i] = simulate_learning_blume_easley( + s_seq_g, f, g, π_0_1, π_0_2, λ) +``` + +Now let's visualize the results + +```{code-cell} ipython3 +fig_f, axes_f = plot_learning_results(results_f, π_0_scenarios, 'f', 1.0) +plt.show() +``` + +```{code-cell} ipython3 +fig_g, axes_g = plot_learning_results(results_g, π_0_scenarios, 'g', 0.0) +plt.show() +``` + +In this case, it is easier to realize one's belief is incorrect, the belief adjust more quickly. + +Observe that consumption shares also adjust more quickly. + +```{solution-end} +``` From 51d998b13c1afb45d2580026143e217893ee0efa Mon Sep 17 00:00:00 2001 From: Humphrey Yang Date: Tue, 19 Aug 2025 21:34:53 +1000 Subject: [PATCH 02/14] updates --- lectures/likelihood_ratio_process_2.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/lectures/likelihood_ratio_process_2.md b/lectures/likelihood_ratio_process_2.md index 922d7f4de..04e4cdb4f 100644 --- a/lectures/likelihood_ratio_process_2.md +++ b/lectures/likelihood_ratio_process_2.md @@ -958,13 +958,13 @@ $$ \pi_t = \frac{ \pi_0 L(s^t)}{ \pi_0 L(s^t) + (1-\pi_0) } $$ -Now consider the mixture model +and the mixture model $$ -m(s^t) = \pi(s^t) f(s^t) + (1- \pi(s^t)) g(s^t) +m(s^t) = \pi_t f(s^t) + (1- \pi_t) g(s^t) $$ (eq:be_mix_model) -Now consider the environment in our Blume-Easley lecture. +Now consider them in the environment in our Blume-Easley lecture. We'll endow each type of consumer with model {eq}`eq:be_mix_model`. @@ -974,10 +974,10 @@ We'll endow each type of consumer with model {eq}`eq:be_mix_model`. Thus, consumer $i$'s probability model is $$ -m^i(s^t) = \pi^i(s^t) f(s^t) + (1- \pi^i(s^t)) g(s^t) \tag{4} -$$ +m^i(s^t) = \pi^i_t f(s^t) + (1- \pi^i_t) g(s^t) +$$ (eq:prob_model) -The idea is to hand probability models (4) for $i=1,2$ to the social planner in the Blume-Easley lecture, deduce allocation $c^i(s^t), i = 1,2$, and watch what happens when +The idea is to hand probability models {eq}`eq:prob_model` for $i=1,2$ to the social planner in the Blume-Easley lecture, deduce allocation $c^i(s^t), i = 1,2$, and watch what happens when * nature's model is $f$ * nature's model is $g$ From 43a0390066e04207252c078daa52592274c4f8ee Mon Sep 17 00:00:00 2001 From: Humphrey Yang Date: Tue, 19 Aug 2025 21:39:49 +1000 Subject: [PATCH 03/14] updates --- lectures/likelihood_ratio_process_2.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/lectures/likelihood_ratio_process_2.md b/lectures/likelihood_ratio_process_2.md index 04e4cdb4f..af1a67a1e 100644 --- a/lectures/likelihood_ratio_process_2.md +++ b/lectures/likelihood_ratio_process_2.md @@ -1028,11 +1028,11 @@ def simulate_learning_blume_easley(sequences, f_belief, g_belief, N, T = sequences.shape # Initialize arrays to store results - π_1_seq = np.empty((N, T)) - π_2_seq = np.empty((N, T)) - c1_share = np.empty((N, T)) - l_agents_seq = np.empty((N, T)) - + π_1_seq = np.full((N, T), np.nan) + π_2_seq = np.full((N, T), np.nan) + c1_share = np.full((N, T), np.nan) + l_agents_seq = np.full((N, T), np.nan) + π_1_seq[:, 0] = π_0_1 π_2_seq[:, 0] = π_0_2 @@ -1096,7 +1096,7 @@ f = jit(lambda x: p(x, F_a, F_b)) g = jit(lambda x: p(x, G_a, G_b)) ``` -We start the $\pi^i_0 \in (0, 1)$ from different starting points and widen the gap +We start with different initial priors $\pi^i_0 \in (0, 1)$ and widen the gap between them. ```{code-cell} ipython3 # Different initial priors @@ -1270,7 +1270,7 @@ fig_g, axes_g = plot_learning_results(results_g, π_0_scenarios, 'g', 0.0) plt.show() ``` -In this case, it is easier to realize one's belief is incorrect, the belief adjust more quickly. +In this case, it is easier to realize one's belief is incorrect; the belief adjusts more quickly. Observe that consumption shares also adjust more quickly. From 122a708df96eae77e1c23c82205da3106081e6ad Mon Sep 17 00:00:00 2001 From: thomassargent30 Date: Wed, 20 Aug 2025 12:42:13 -0400 Subject: [PATCH 04/14] Tom's edits of Blume-Easley lecture and bib file --- lectures/_static/quant-econ.bib | 10 +++ lectures/likelihood_ratio_process_2.md | 96 +++++++++++++++----------- 2 files changed, 67 insertions(+), 39 deletions(-) diff --git a/lectures/_static/quant-econ.bib b/lectures/_static/quant-econ.bib index 7e02441fe..0fd46856d 100644 --- a/lectures/_static/quant-econ.bib +++ b/lectures/_static/quant-econ.bib @@ -3,6 +3,16 @@ Note: Extended Information (like abstracts, doi, url's etc.) can be found in quant-econ-extendedinfo.bib file in _static/ ### +@article{blume2018case, + title={A case for incomplete markets}, + author={Blume, Lawrence E and Cogley, Timothy and Easley, David A and Sargent, Thomas J and Tsyrennikov, Viktor}, + journal={Journal of Economic Theory}, + volume={178}, + pages={191--221}, + year={2018}, + publisher={Elsevier} +} + @article{shannon1948mathematical, title={A mathematical theory of communication}, author={Shannon, Claude E}, diff --git a/lectures/likelihood_ratio_process_2.md b/lectures/likelihood_ratio_process_2.md index af1a67a1e..7a5354500 100644 --- a/lectures/likelihood_ratio_process_2.md +++ b/lectures/likelihood_ratio_process_2.md @@ -52,7 +52,7 @@ We'll study two alternative arrangements: The fundamental theorems of welfare economics will apply and assure us that these two arrangements end up producing exactly the same allocation of consumption goods to individuals **provided** that the social planner assigns an appropriate set of **Pareto weights**. ```{note} -You can learn about how the two welfare theorems are applied in modern macroeconomic models in {doc}`this lecture on a planning problem ` and {doc}`this lecture on a related competitive equilibrium `. +You can learn about how the two welfare theorems are applied in modern macroeconomic models in {doc}`this lecture on a planning problem ` and {doc}`this lecture on a related competitive equilibrium `. {doc}`This quantecon lecture ` presents a recursive formulation of complete markets models with homogeneous beliefs. ``` @@ -830,13 +830,22 @@ This ties in nicely with {eq}`eq:kl_likelihood_link`. ## Related Lectures -Likelihood processes play an important role in Bayesian learning, as described in {doc}`likelihood_bayes` -and as applied in {doc}`odu`. +Complete markets models with homogeneous beliefs, a kind often used in macroeconomics and finance, are studied in this quantecon lecture {doc}`ge_arrow`. + +{cite}`blume2018case` discuss a paternalistic case against complete markets. Their analysis assumes that a social planner should disregard individuals preferences in the sense that it should disregard the subjective belief components of their preferences. + +Likelihood processes play an important role in Bayesian learning, as described in {doc}`likelihood_bayes` and as applied in {doc}`odu`. Likelihood ratio processes appear again in {doc}`advanced:additive_functionals`. -## Exercise + +{doc}`ge_arrow` + + + + +## Exercises ```{exercise} :label: lr_ex3 @@ -892,7 +901,7 @@ $$ c_t^1(s^t) = \frac{\lambda l_t(s^t)}{1 - \lambda + \lambda l_t(s^t)} $$ -To match them, we need the following equality to hold +To match agent 1's choice in a competitive equilibrium with the planner's choice for agent 1, the following equality must hold $$ \frac{\mu_2}{\mu_1} = \frac{\lambda}{1 - \lambda} @@ -932,9 +941,12 @@ $$ ```{exercise} :label: lr_ex4 -In this exercise, we will implement the Blume-Easley model with learning agents. +In this exercise, we'll study two agents, each of whom updates its posterior probability as +data arrive. + + * each agent applies Bayes' law in the way studied in {doc}`likelihood_bayes`. -Consider the two models +The following two models are on the table $$ f(s^t) = f(s_1) f(s_2) \cdots f(s_t) @@ -943,29 +955,27 @@ $$ and $$ -g(s^t) = g(s_1) g(s_2) \cdots g(s_t) +g(s^t) = g(s_1) g(s_2) \cdots g(s_t) $$ -and associated likelihood ratio process +as is an associated likelihood ratio process $$ -L(s^t) = \frac{f(s^t)}{g(s^t)} +L(s^t) = \frac{f(s^t)}{g(s^t)} . $$ Let $\pi_0 \in (0,1)$ be a prior probability and $$ -\pi_t = \frac{ \pi_0 L(s^t)}{ \pi_0 L(s^t) + (1-\pi_0) } +\pi_t = \frac{ \pi_0 L(s^t)}{ \pi_0 L(s^t) + (1-\pi_0) } . $$ -and the mixture model +Each of our two agents deploys its own version of the mixture model $$ m(s^t) = \pi_t f(s^t) + (1- \pi_t) g(s^t) $$ (eq:be_mix_model) -Now consider them in the environment in our Blume-Easley lecture. - We'll endow each type of consumer with model {eq}`eq:be_mix_model`. * The two agents share the same $f$ and $g$, but @@ -977,19 +987,25 @@ $$ m^i(s^t) = \pi^i_t f(s^t) + (1- \pi^i_t) g(s^t) $$ (eq:prob_model) -The idea is to hand probability models {eq}`eq:prob_model` for $i=1,2$ to the social planner in the Blume-Easley lecture, deduce allocation $c^i(s^t), i = 1,2$, and watch what happens when +We now hand probability models {eq}`eq:prob_model` for $i=1,2$ to the social planner. + +We want to deduce allocation $c^i(s^t), i = 1,2$, and watch what happens when * nature's model is $f$ * nature's model is $g$ -Both consumers will eventually learn the "truth", but one of them will learn faster. +We expect that consumers will eventually learn the "truth", but that one of them will learn faster. + +To explore things, please set $f \sim \text{Beta}(1.5, 1)$ and $g \sim \text{Beta}(1, 1.5)$. + +Please write Python code that answers the following questions. + + * How do consumption shares evolve? + * Which agent learns faster when nature follows $f$? + * Which agent learns faster when nature follows $g$? + * How does a difference in initial priors $\pi_0^1$ and $\pi_0^2$ affect the convergence speed? -Questions: -1. How do their consumption shares evolve? -2. Which agent learns faster when nature follows $f$? When nature follows $g$? -3. How does the difference in initial priors $\pi_0^1$ and $\pi_0^2$ affect the convergence speed? -In the exercise below, set $f \sim \text{Beta}(1.5, 1)$ and $g \sim \text{Beta}(1, 1.5)$. ``` @@ -997,9 +1013,9 @@ In the exercise below, set $f \sim \text{Beta}(1.5, 1)$ and $g \sim \text{Beta}( :class: dropdown ``` -Here is one solution. -First, let's set up the model with learning agents: + +First, let's write helper functions that compute model components including each agent's subjective belief function. ```{code-cell} ipython3 def bayesian_update(π_0, L_t): @@ -1017,7 +1033,7 @@ def mixture_density_belief(s_seq, f_func, g_func, π_seq): return π_seq * f_vals + (1 - π_seq) * g_vals ``` -Now let's implement the learning Blume-Easley simulation: +Now let's write code that simulates the Blume-Easley model with our two agents. ```{code-cell} ipython3 def simulate_learning_blume_easley(sequences, f_belief, g_belief, @@ -1096,7 +1112,7 @@ f = jit(lambda x: p(x, F_a, F_b)) g = jit(lambda x: p(x, G_a, G_b)) ``` -We start with different initial priors $\pi^i_0 \in (0, 1)$ and widen the gap between them. +We'll start with different initial priors $\pi^i_0 \in (0, 1)$ and widen the gap between them. ```{code-cell} ipython3 # Different initial priors @@ -1128,7 +1144,7 @@ for i, (π_0_1, π_0_2) in enumerate(π_0_scenarios): s_seq_g, f, g, π_0_1, π_0_2, λ) ``` -Now let's visualize the results +Let's visualize the results ```{code-cell} ipython3 def plot_learning_results(results, π_0_scenarios, nature_type, truth_value): @@ -1180,7 +1196,7 @@ def plot_learning_results(results, π_0_scenarios, nature_type, truth_value): return fig, axes ``` -Now use the function to plot results when nature follows f: +Now we'll plot outcome when nature follows f: ```{code-cell} ipython3 fig_f, axes_f = plot_learning_results( @@ -1188,20 +1204,20 @@ fig_f, axes_f = plot_learning_results( plt.show() ``` -We can see that the agent with more "accurate" belief gets higher consumption share. +We can see that the agent with the more accurate belief gets higher consumption share. -Moreover, the further the initial beliefs are, the longer it takes for the consumption ratio to converge. +Moreover, the further apart are initial beliefs, the longer it takes for the consumption ratio to converge. -The time it takes for the "less accurate" agent costs their share in future consumption. +The longer it takes for the "less accurate" agent to learn, the lower its ultimate consumption share. -Now plot results when nature follows g: +Now let's plot outcomes when nature follows g: ```{code-cell} ipython3 fig_g, axes_g = plot_learning_results(results_g, π_0_scenarios, 'g', 0.0) plt.show() ``` -We observe a similar but symmetrical pattern. +We observe symmetrical outcomes. ```{solution-end} ``` @@ -1209,15 +1225,15 @@ We observe a similar but symmetrical pattern. ```{exercise} :label: lr_ex5 -In the previous exercise, we specifically set the two beta distributions to be relatively close to each other. +In the previous exercise, we purposefully set the two beta distributions to be relatively close to each other. -That is to say, it is harder to distinguish between the two distributions. +That made it challenging to distinguish the distributions. -Now let's explore an alternative scenario where the two distributions are further apart. +Now let's study outcomes when the distributions are further apart. -Specifically, we set $f \sim \text{Beta}(2, 5)$ and $g \sim \text{Beta}(5, 2)$. +Let's set $f \sim \text{Beta}(2, 5)$ and $g \sim \text{Beta}(5, 2)$. -Try to compare the learning dynamics in this scenario with the previous one using the simulation code we developed earlier. +Please use the Python code you have written to study outcomes. ``` ```{solution-start} lr_ex5 @@ -1269,10 +1285,12 @@ plt.show() fig_g, axes_g = plot_learning_results(results_g, π_0_scenarios, 'g', 0.0) plt.show() ``` +Evidently, because the two distributions are further apart, it is easier to distinguish them. + +So learning occurs more quickly. -In this case, it is easier to realize one's belief is incorrect; the belief adjusts more quickly. -Observe that consumption shares also adjust more quickly. +So do consumption shares. ```{solution-end} ``` From 666442c691765c7d381c5cc735224d49b359f6b3 Mon Sep 17 00:00:00 2001 From: Humphrey Yang Date: Thu, 21 Aug 2025 18:06:05 +1000 Subject: [PATCH 05/14] update new exciting exercises --- lectures/likelihood_ratio_process_2.md | 381 ++++++++++++++++++++++++- 1 file changed, 369 insertions(+), 12 deletions(-) diff --git a/lectures/likelihood_ratio_process_2.md b/lectures/likelihood_ratio_process_2.md index 7a5354500..c21ab638f 100644 --- a/lectures/likelihood_ratio_process_2.md +++ b/lectures/likelihood_ratio_process_2.md @@ -4,7 +4,7 @@ jupytext: extension: .md format_name: myst format_version: 0.13 - jupytext_version: 1.17.2 + jupytext_version: 1.16.6 kernelspec: display_name: Python 3 (ipykernel) language: python @@ -63,7 +63,7 @@ Let's start by importing some Python tools. ```{code-cell} ipython3 import matplotlib.pyplot as plt import numpy as np -from numba import vectorize, jit +from numba import vectorize, jit, prange from math import gamma from scipy.integrate import quad from scipy.optimize import brentq, minimize_scalar @@ -1112,7 +1112,7 @@ f = jit(lambda x: p(x, F_a, F_b)) g = jit(lambda x: p(x, G_a, G_b)) ``` -We'll start with different initial priors $\pi^i_0 \in (0, 1)$ and widen the gap between them. +We'll start with different initial priors $\pi^i_0 \in (0, 1)$ and widen the gap between them. ```{code-cell} ipython3 # Different initial priors @@ -1168,29 +1168,29 @@ def plot_learning_results(results, π_0_scenarios, nature_type, truth_value): ax = axes[row, 0] π_1_med = np.median(res['π_1'], axis=0) π_2_med = np.median(res['π_2'], axis=0) - ax.plot(π_1_med, 'C0', label=r'$\pi_1^t$ (agent 1)', linewidth=2) - ax.plot(π_2_med, 'C1', label=r'$\pi_2^t$ (agent 2)', linewidth=2) + ax.plot(π_1_med, 'C0', label=r'agent 1', linewidth=2) + ax.plot(π_2_med, 'C1', label=r'agent 2', linewidth=2) ax.axhline(y=truth_value, color='gray', linestyle='--', alpha=0.5, label=f'truth ({nature_type})') - ax.set_title(f'beliefs when nature = {nature_type}\n{scenario_label}') - ax.set_ylabel('belief probability') + ax.set_title(f'Beliefs when nature = {nature_type}\n{scenario_label}') + ax.set_ylabel(r'median $\pi_i^t$') ax.set_ylim([-0.05, 1.05]) ax.legend() # Plot consumption shares ax = axes[row, 1] c1_med = np.median(res['c1_share'], axis=0) - ax.plot(c1_med, 'g-', linewidth=2, label='agent 1 consumption share') + ax.plot(c1_med, 'g-', linewidth=2, label='median') ax.axhline(y=0.5, color='gray', linestyle='--', - alpha=0.5, label='equal split') - ax.set_title(f'consumption when nature = {nature_type}') - ax.set_ylabel('agent 1 share') + alpha=0.5) + ax.set_title(f'Agent 1 consumption share (Nature = {nature_type})') + ax.set_ylabel('consumption share') ax.set_ylim([0, 1]) ax.legend() # Add x-labels for col in range(2): - axes[row, col].set_xlabel('time') + axes[row, col].set_xlabel('$t$') plt.tight_layout() return fig, axes @@ -1285,6 +1285,7 @@ plt.show() fig_g, axes_g = plot_learning_results(results_g, π_0_scenarios, 'g', 0.0) plt.show() ``` + Evidently, because the two distributions are further apart, it is easier to distinguish them. So learning occurs more quickly. @@ -1294,3 +1295,359 @@ So do consumption shares. ```{solution-end} ``` + +```{exercise} +:label: lr_ex6 + +Two agents with different beliefs about three possible models. + +Assume $f(x) \geq 0$, $g(x) \geq 0$, and $h(x) \geq 0$ for $x \in X$ with: +- $\int_X f(x) dx = 1$ +- $\int_X g(x) dx = 1$ +- $\int_X h(x) dx = 1$ + +We'll consider two agents: +* Agent 1: $\pi^g_0 = 1 - \pi^f_0$, $\pi^f_0 \in (0,1)$ (believes only in models $f$ and $g$) +* Agent 2: $\pi^g_0 = \pi^f_0 = 1/3$ (equally weights all three models) + +Set $h = \pi^f_0 f + (1-\pi^f_0) g$ (a mixture of $f$ and $g$). + +Simulate and visualize the evolution of consumption allocations when: +* Nature permanently draws from $f$ +* Nature permanently draws from $g$ + +Use the existing code structure to implement this simulation and observe how the allocation evolves over time. + +``` + +```{solution-start} lr_ex6 +:class: dropdown +``` + +Let's implement this three-model case with two agents having different beliefs. + +First, let's define $f$ and $g$ far apart, with $h$ being a mixture of $f$ and $g$ + +```{code-cell} ipython3 +F_a, F_b = 1, 1 +G_a, G_b = 3, 1.2 +λ = 0.5 +π_f_0 = 0.4 + +f = jit(lambda x: p(x, F_a, F_b)) +g = jit(lambda x: p(x, G_a, G_b)) +h = jit(lambda x: π_f_0 * f(x) + (1 - π_f_0) * g(x)) +``` + +Now we can define the belief updating for the three-agent model + +```{code-cell} ipython3 +@jit(parallel=True) +def compute_posterior_three_models( + s_seq, f_func, g_func, h_func, π_f_0, π_g_0): + """ + Compute posterior probabilities for three models. + """ + N, T = s_seq.shape + π_h_0 = 1 - π_f_0 - π_g_0 + + π_f = np.zeros((N, T)) + π_g = np.zeros((N, T)) + π_h = np.zeros((N, T)) + + for n in prange(N): + # Initialize with priors + π_f[n, 0] = π_f_0 + π_g[n, 0] = π_g_0 + π_h[n, 0] = π_h_0 + + # Compute cumulative likelihoods + f_cumul = 1.0 + g_cumul = 1.0 + h_cumul = 1.0 + + for t in range(1, T): + s_t = s_seq[n, t] + + # Update cumulative likelihoods + f_cumul *= f_func(s_t) + g_cumul *= g_func(s_t) + h_cumul *= h_func(s_t) + + # Compute posteriors using Bayes' rule + denominator = π_f_0 * f_cumul + π_g_0 * g_cumul + π_h_0 * h_cumul + + π_f[n, t] = π_f_0 * f_cumul / denominator + π_g[n, t] = π_g_0 * g_cumul / denominator + π_h[n, t] = π_h_0 * h_cumul / denominator + + return π_f, π_g, π_h +``` + +Let's also write the simulation code following the same idea as in the previous exercises + +```{code-cell} ipython3 +@jit(parallel=True) +def simulate_three_model_allocation(s_seq, f_func, g_func, h_func, + π_f_1, π_g_1, π_f_2, π_g_2, λ=0.5): + """ + Simulate two agents having beliefs over three models. + """ + + N, T = s_seq.shape + + # Compute posteriors for both agents + π_f_1_seq, π_g_1_seq, π_h_1_seq = compute_posterior_three_models( + s_seq, f_func, g_func, h_func, π_f_1, π_g_1) + π_f_2_seq, π_g_2_seq, π_h_2_seq = compute_posterior_three_models( + s_seq, f_func, g_func, h_func, π_f_2, π_g_2) + + # Compute consumption shares + c1_share = np.zeros((N, T)) + + for n in prange(N): + l_agents_cumul = 1.0 # Initialize likelihood ratio between agents + + for t in range(T): + # Agent 1's mixture density + m1_t = (π_f_1_seq[n, t] * f_func(s_seq[n, t]) + + π_g_1_seq[n, t] * g_func(s_seq[n, t]) + + π_h_1_seq[n, t] * h_func(s_seq[n, t])) + + # Agent 2's mixture density + m2_t = (π_f_2_seq[n, t] * f_func(s_seq[n, t]) + + π_g_2_seq[n, t] * g_func(s_seq[n, t]) + + π_h_2_seq[n, t] * h_func(s_seq[n, t])) + + # Update likelihood ratio between agents + if t > 0: + l_agents_cumul *= (m1_t / m2_t) + + # Consumption share for agent 1 + c1_share[n, t] = λ * l_agents_cumul / (1 - λ + λ * l_agents_cumul) + + return c1_share, π_f_1_seq, π_g_1_seq, π_h_1_seq, π_f_2_seq, π_g_2_seq, π_h_2_seq +``` + +The following code cell defines a plotting function to show the convergence of beliefs and consumption ratio + +```{code-cell} ipython3 + +def plot_three_model_results(c1_data, π_data, nature_labels, λ=0.5, + agent_labels=None, title_suffix="", figsize=(12, 10)): + """ + Create plots for three-model exercises. + """ + n_scenarios = len(nature_labels) + fig, axes = plt.subplots(2, n_scenarios, figsize=figsize) + if n_scenarios == 1: + axes = axes.reshape(2, 1) + + colors = ['blue', 'green', 'orange'] # For different nature scenarios + + for i, (nature_label, c1, π_tuple) in enumerate( + zip(nature_labels, c1_data, π_data)): + πf1, πg1, πh1, πf2, πg2, πh2 = π_tuple + + ax = axes[i, 0] + ax.plot(np.median(πf1, axis=0), 'C0-', linewidth=2) + ax.plot(np.median(πg1, axis=0), 'C0--', linewidth=2) + ax.plot(np.median(πh1, axis=0), 'C0:', linewidth=2) + ax.plot(np.median(πf2, axis=0), 'C1-', linewidth=2) + ax.plot(np.median(πg2, axis=0), 'C1--', linewidth=2) + ax.plot(np.median(πh2, axis=0), 'C1:', linewidth=2) + + # Truth indicator + truth_val = 1.0 if nature_label == 'f' else ( + 1.0 if nature_label == 'g' else 0.0) + ax.axhline(y=truth_val, color='grey', linestyle='-.', alpha=0.7) + + ax.set_title(f'Beliefs when Nature = {nature_label}') + ax.set_xlabel('$t$') + ax.set_ylabel(r'median $\pi(\cdot)$') + ax.set_ylim([-0.01, 1.01]) + + if i == 0: + from matplotlib.lines import Line2D + + # Agent colors legend + agent_elements = [ + Line2D([0], [0], color='C0', linewidth=2, label='agent 1'), + Line2D([0], [0], color='C1', linewidth=2, label='agent 2') + ] + agent_legend = ax.legend(handles=agent_elements, loc='upper left') + + # Line styles legend + style_elements = [ + Line2D([0], [0], color='black', + linestyle='-', label='π(f)'), + Line2D([0], [0], color='black', + linestyle='--', label='π(g)'), + Line2D([0], [0], color='black', + linestyle=':', label='π(h)'), + Line2D([0], [0], color='grey', + linestyle='-.', alpha=0.7, label='truth') + ] + ax.legend(handles=style_elements, loc='upper right') + + ax.add_artist(agent_legend) + + ax = axes[i, 1] + c1_med = np.median(c1, axis=0) + ax.plot(c1_med, color=colors[i], linewidth=2, label="median") + ax.axhline(y=0.5, color='grey', linestyle='--', alpha=0.5) + ax.set_title( + f'Agent 1 consumption share (Nature = {nature_label}{title_suffix})') + ax.set_xlabel('t') + ax.set_ylabel("median consumption share") + ax.set_ylim([-0.01, 1.01]) + ax.legend() + + plt.tight_layout() + return fig, axes +``` + +Now let's run the simulation. + +In our simulation, agent 1 believes only in $f$ and $g$, while agent 2 has an equal weight on all three models + +```{code-cell} ipython3 +T = 100 +N = 1000 + +# Generate sequences for nature f and g +s_seq_f = np.random.beta(F_a, F_b, (N, T)) +s_seq_g = np.random.beta(G_a, G_b, (N, T)) + +results_f = simulate_three_model_allocation(s_seq_f, + f, g, h, π_f_0, 1-π_f_0, + 1/3, 1/3, λ) +results_g = simulate_three_model_allocation(s_seq_g, + f, g, h, π_f_0, 1-π_f_0, + 1/3, 1/3, λ) + +c1_data = [results_f[0], results_g[0]] +π_data = [results_f[1:], results_g[1:]] +nature_labels = ['f', 'g'] + +fig, axes = plot_three_model_results(c1_data, π_data, nature_labels, λ) +plt.show() +``` + +The results show interesting dynamics: + +In the top panel, Agent 1 (orange line) who initially puts weight only on $f$ (solid line) and $g$ (dashed line) eventually dominates consumption as they learn the truth faster than Agent 2 who spreads probability across all three models. + +When nature draws from $g$ (lower panel), we see a similar pattern but reversed -- Agent 1's consumption share decreases as their belief converges to the truth. + +For both cases, the belief on $h$ (dotted line) eventually goes to 0. + +The agent with the simpler (but correct) model structure learns faster and eventually dominates consumption allocation. + +```{solution-end} +``` + +```{exercise} +:label: lr_ex7 + +Two agents with extreme priors about three models. + +Consider the same setup as the previous exercise, but now: +* Agent 1: $\pi^g_0 = \pi^f_0 = \frac{\epsilon}{2} > 0$, where $\epsilon$ is close to $0$ (e.g., $\epsilon = 0.01$) +* Agent 2: $\pi^g_0 = \pi^f_0 = 0$ (dogmatic belief in model $h$) + +Choose $h$ to be close but not equal to either $f$ or $g$ as measured by KL divergence. For example, set $h \sim \text{Beta}(1.2, 1.1)$. + +Simulate and visualize the evolution of consumption allocations when: +* Nature permanently draws from $f$ +* Nature permanently draws from $g$ + +Observe how the presence of extreme priors affects learning and allocation dynamics. + +``` + +```{solution-start} lr_ex7 +:class: dropdown +``` + +Let's implement this case with extreme priors where one agent is almost dogmatic. + +For this to converge, we need a longer sequence by increasing $T$ to 1000. + +Let's define the parameters for distributions and verify that $h$ and $f$ are closer than $h$ and $g$ + +```{code-cell} ipython3 +F_a, F_b = 1, 1 +G_a, G_b = 3, 1.2 +H_a, H_b = 1.2, 1.1 + +f = jit(lambda x: p(x, F_a, F_b)) +g = jit(lambda x: p(x, G_a, G_b)) +h = jit(lambda x: p(x, H_a, H_b)) + +Kh_f = compute_KL(h, f) +Kh_g = compute_KL(h, g) +Kf_h = compute_KL(f, h) +Kg_h = compute_KL(g, h) + +print(f"KL divergences:") +print(f"KL(h,f) = {Kh_f:.4f}, KL(h,g) = {Kh_g:.4f}") +print(f"KL(f,h) = {Kf_h:.4f}, KL(g,h) = {Kg_h:.4f}") +``` + +Now we can set the belief models for the two agents + +```{code-cell} ipython3 +# Set extreme priors +ε = 0.01 +λ = 0.5 + +# Agent 1: π_f = ε/2, π_g = ε/2, π_h = 1-ε +# (almost dogmatic about h) +π_f_1 = ε/2 +π_g_1 = ε/2 + +# Agent 2: π_f = 0, π_g = 0, π_h = 1 +# (fully dogmatic about h) +π_f_2 = 1e-10 +π_g_2 = 1e-10 +``` + +Now we can run the simulation + +```{code-cell} ipython3 +T = 1000 +N = 1000 + +# Generate sequences for different nature scenarios +s_seq_f = np.random.beta(F_a, F_b, (N, T)) +s_seq_g = np.random.beta(G_a, G_b, (N, T)) + +# Run simulations for both scenarios +results_f = simulate_three_model_allocation( + s_seq_f, + f, g, h, + π_f_1, π_g_1, π_f_2, π_g_2, λ) +results_g = simulate_three_model_allocation( + s_seq_g, + f, g, h, + π_f_1, π_g_1, π_f_2, π_g_2, λ) + +c1_data = [results_f[0], results_g[0]] +π_data = [results_f[1:], results_g[1:]] +nature_labels = ['f', 'g'] + +title_suffix = f" (Agent 1: ε={ε}, Agent 2: dogmatic)" +fig, axes = plot_three_model_results(c1_data, π_data, nature_labels, λ, + title_suffix=title_suffix) +plt.show() +``` + +In the top panel, observe how slowly agent 1 is adjusting to the truth -- the belief is rigid but still updating. + +However, since agent 2 is dogmatic about $h$, and $f$ is very hard to distinguish from $g$ as measured by $KL(f, g)$, we can see that the belief is almost standing still. + +In the bottom panel, since $g$ is further away from $h$, both agents adjust toward the truth very quickly, but agent 1 acts faster given the slightly higher weight on $f$ and $g$. + +```{solution-end} +``` From e51ac00887f35d955d350102ca2a2977a0fb1387 Mon Sep 17 00:00:00 2001 From: Humphrey Yang Date: Thu, 21 Aug 2025 22:52:45 +1000 Subject: [PATCH 06/14] minor updates --- lectures/likelihood_ratio_process_2.md | 31 +++++++++++++++++--------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/lectures/likelihood_ratio_process_2.md b/lectures/likelihood_ratio_process_2.md index c21ab638f..9f1031ab0 100644 --- a/lectures/likelihood_ratio_process_2.md +++ b/lectures/likelihood_ratio_process_2.md @@ -1299,7 +1299,7 @@ So do consumption shares. ```{exercise} :label: lr_ex6 -Two agents with different beliefs about three possible models. +Two agents have different beliefs about three possible models. Assume $f(x) \geq 0$, $g(x) \geq 0$, and $h(x) \geq 0$ for $x \in X$ with: - $\int_X f(x) dx = 1$ @@ -1307,10 +1307,14 @@ Assume $f(x) \geq 0$, $g(x) \geq 0$, and $h(x) \geq 0$ for $x \in X$ with: - $\int_X h(x) dx = 1$ We'll consider two agents: -* Agent 1: $\pi^g_0 = 1 - \pi^f_0$, $\pi^f_0 \in (0,1)$ (believes only in models $f$ and $g$) -* Agent 2: $\pi^g_0 = \pi^f_0 = 1/3$ (equally weights all three models) +* Agent 1: $\pi^g_0 = 1 - \pi^f_0$, $\pi^f_0 \in (0,1), \pi^h_0 = 0$ +(believes only in models $f$ and $g$) +* Agent 2: $\pi^g_0 = \pi^f_0 = 1/3$, $\pi^h_0 = 1/3$ +(equally weights all three models) -Set $h = \pi^f_0 f + (1-\pi^f_0) g$ (a mixture of $f$ and $g$). +Let $f$ and $g$ be two beta distributions with $f \sim \text{Beta}(1, 1)$ and +$g \sim \text{Beta}(3, 1.2)$, and +set $h = \pi^f_0 f + (1-\pi^f_0) g$ (a mixture of $f$ and $g$). Simulate and visualize the evolution of consumption allocations when: * Nature permanently draws from $f$ @@ -1443,7 +1447,7 @@ def plot_three_model_results(c1_data, π_data, nature_labels, λ=0.5, if n_scenarios == 1: axes = axes.reshape(2, 1) - colors = ['blue', 'green', 'orange'] # For different nature scenarios + colors = ['blue', 'green', 'orange'] for i, (nature_label, c1, π_tuple) in enumerate( zip(nature_labels, c1_data, π_data)): @@ -1534,7 +1538,7 @@ fig, axes = plot_three_model_results(c1_data, π_data, nature_labels, λ) plt.show() ``` -The results show interesting dynamics: +The results show interesting dynamics. In the top panel, Agent 1 (orange line) who initially puts weight only on $f$ (solid line) and $g$ (dashed line) eventually dominates consumption as they learn the truth faster than Agent 2 who spreads probability across all three models. @@ -1544,6 +1548,8 @@ For both cases, the belief on $h$ (dotted line) eventually goes to 0. The agent with the simpler (but correct) model structure learns faster and eventually dominates consumption allocation. +In other words, the model penalizes complexity and rewards accuracy. + ```{solution-end} ``` @@ -1554,9 +1560,10 @@ Two agents with extreme priors about three models. Consider the same setup as the previous exercise, but now: * Agent 1: $\pi^g_0 = \pi^f_0 = \frac{\epsilon}{2} > 0$, where $\epsilon$ is close to $0$ (e.g., $\epsilon = 0.01$) -* Agent 2: $\pi^g_0 = \pi^f_0 = 0$ (dogmatic belief in model $h$) +* Agent 2: $\pi^g_0 = \pi^f_0 = 0$ (rigid belief in model $h$) -Choose $h$ to be close but not equal to either $f$ or $g$ as measured by KL divergence. For example, set $h \sim \text{Beta}(1.2, 1.1)$. +Choose $h$ to be close but not equal to either $f$ or $g$ as measured by KL divergence. +For example, set $h \sim \text{Beta}(1.2, 1.1)$. Simulate and visualize the evolution of consumption allocations when: * Nature permanently draws from $f$ @@ -1603,12 +1610,12 @@ Now we can set the belief models for the two agents λ = 0.5 # Agent 1: π_f = ε/2, π_g = ε/2, π_h = 1-ε -# (almost dogmatic about h) +# (almost rigid about h) π_f_1 = ε/2 π_g_1 = ε/2 # Agent 2: π_f = 0, π_g = 0, π_h = 1 -# (fully dogmatic about h) +# (fully rigid about h) π_f_2 = 1e-10 π_g_2 = 1e-10 ``` @@ -1645,7 +1652,9 @@ plt.show() In the top panel, observe how slowly agent 1 is adjusting to the truth -- the belief is rigid but still updating. -However, since agent 2 is dogmatic about $h$, and $f$ is very hard to distinguish from $g$ as measured by $KL(f, g)$, we can see that the belief is almost standing still. +The belief about $h$ slowly shifts towards 0 crossing the belief about $f$ moving up to 1 at $t = 500$. + +However, since agent 2 is rigid about $h$, and $f$ is very difficult to distinguish from $h$ as measured by $KL(f, h)$, we can see that the belief is almost stationary due to the difficulty of realizing the belief is incorrect. In the bottom panel, since $g$ is further away from $h$, both agents adjust toward the truth very quickly, but agent 1 acts faster given the slightly higher weight on $f$ and $g$. From eb5dc78cdf377140ad53709c733d42e172732691 Mon Sep 17 00:00:00 2001 From: Humphrey Yang Date: Thu, 21 Aug 2025 23:15:50 +1000 Subject: [PATCH 07/14] minor updates --- lectures/likelihood_ratio_process_2.md | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/lectures/likelihood_ratio_process_2.md b/lectures/likelihood_ratio_process_2.md index 9f1031ab0..a3ca81c14 100644 --- a/lectures/likelihood_ratio_process_2.md +++ b/lectures/likelihood_ratio_process_2.md @@ -1436,9 +1436,10 @@ def simulate_three_model_allocation(s_seq, f_func, g_func, h_func, The following code cell defines a plotting function to show the convergence of beliefs and consumption ratio ```{code-cell} ipython3 +:tags: [hide-input] def plot_three_model_results(c1_data, π_data, nature_labels, λ=0.5, - agent_labels=None, title_suffix="", figsize=(12, 10)): + agent_labels=None, figsize=(12, 10)): """ Create plots for three-model exercises. """ @@ -1501,7 +1502,7 @@ def plot_three_model_results(c1_data, π_data, nature_labels, λ=0.5, ax.plot(c1_med, color=colors[i], linewidth=2, label="median") ax.axhline(y=0.5, color='grey', linestyle='--', alpha=0.5) ax.set_title( - f'Agent 1 consumption share (Nature = {nature_label}{title_suffix})') + f'Agent 1 consumption share (Nature = {nature_label})') ax.set_xlabel('t') ax.set_ylabel("median consumption share") ax.set_ylim([-0.01, 1.01]) @@ -1644,9 +1645,7 @@ c1_data = [results_f[0], results_g[0]] π_data = [results_f[1:], results_g[1:]] nature_labels = ['f', 'g'] -title_suffix = f" (Agent 1: ε={ε}, Agent 2: dogmatic)" -fig, axes = plot_three_model_results(c1_data, π_data, nature_labels, λ, - title_suffix=title_suffix) +fig, axes = plot_three_model_results(c1_data, π_data, nature_labels, λ) plt.show() ``` From 08d6c24bbea0f9af42274d7243dbfd9cf808652b Mon Sep 17 00:00:00 2001 From: Humphrey Yang Date: Thu, 21 Aug 2025 23:19:05 +1000 Subject: [PATCH 08/14] updates --- lectures/likelihood_ratio_process_2.md | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/lectures/likelihood_ratio_process_2.md b/lectures/likelihood_ratio_process_2.md index a3ca81c14..024c7f4eb 100644 --- a/lectures/likelihood_ratio_process_2.md +++ b/lectures/likelihood_ratio_process_2.md @@ -1316,6 +1316,21 @@ Let $f$ and $g$ be two beta distributions with $f \sim \text{Beta}(1, 1)$ and $g \sim \text{Beta}(3, 1.2)$, and set $h = \pi^f_0 f + (1-\pi^f_0) g$ (a mixture of $f$ and $g$). +Bayes' Law tells us that posterior probabilities on models $f$ and $g$ evolve according to + +$$ +\pi^f(s^t) := \frac{\pi^f_0 f(s^t)}{\pi^f_0 f(s^t) ++ \pi^g(s^t) g(s^t) + (1 - \pi^f_0 - \pi^g_0) h(s^t)} +$$ + +and + +$$ +\pi^g(s^t) := \frac{\pi^g_0 g(s^t)}{\pi^f_0 f(s^t) ++ \pi^g(s^t) g(s^t) + (1 - \pi^f_0 - \pi^g_0) h(s^t)} +$$ + + Simulate and visualize the evolution of consumption allocations when: * Nature permanently draws from $f$ * Nature permanently draws from $g$ @@ -1557,7 +1572,7 @@ In other words, the model penalizes complexity and rewards accuracy. ```{exercise} :label: lr_ex7 -Two agents with extreme priors about three models. +Now consider two agents with extreme priors about three models. Consider the same setup as the previous exercise, but now: * Agent 1: $\pi^g_0 = \pi^f_0 = \frac{\epsilon}{2} > 0$, where $\epsilon$ is close to $0$ (e.g., $\epsilon = 0.01$) From a5ada585456c7fa4204c04463b1f69694d9d5e00 Mon Sep 17 00:00:00 2001 From: thomassargent30 Date: Thu, 21 Aug 2025 13:53:51 -0400 Subject: [PATCH 09/14] Tom's August 21 edits of Humphrey's two new exercises --- lectures/likelihood_ratio_process_2.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/lectures/likelihood_ratio_process_2.md b/lectures/likelihood_ratio_process_2.md index 024c7f4eb..ce75b75ed 100644 --- a/lectures/likelihood_ratio_process_2.md +++ b/lectures/likelihood_ratio_process_2.md @@ -1308,13 +1308,13 @@ Assume $f(x) \geq 0$, $g(x) \geq 0$, and $h(x) \geq 0$ for $x \in X$ with: We'll consider two agents: * Agent 1: $\pi^g_0 = 1 - \pi^f_0$, $\pi^f_0 \in (0,1), \pi^h_0 = 0$ -(believes only in models $f$ and $g$) +(attaches positive probability only to models $f$ and $g$) * Agent 2: $\pi^g_0 = \pi^f_0 = 1/3$, $\pi^h_0 = 1/3$ -(equally weights all three models) +(attaches equal weights to all three models) Let $f$ and $g$ be two beta distributions with $f \sim \text{Beta}(1, 1)$ and $g \sim \text{Beta}(3, 1.2)$, and -set $h = \pi^f_0 f + (1-\pi^f_0) g$ (a mixture of $f$ and $g$). +set $h = \pi^f_0 f + (1-\pi^f_0) g$. Bayes' Law tells us that posterior probabilities on models $f$ and $g$ evolve according to @@ -1448,7 +1448,7 @@ def simulate_three_model_allocation(s_seq, f_func, g_func, h_func, return c1_share, π_f_1_seq, π_g_1_seq, π_h_1_seq, π_f_2_seq, π_g_2_seq, π_h_2_seq ``` -The following code cell defines a plotting function to show the convergence of beliefs and consumption ratio +The following code cell defines a plotting function to show evolutions of beliefs and consumption ratios ```{code-cell} ipython3 :tags: [hide-input] @@ -1529,7 +1529,7 @@ def plot_three_model_results(c1_data, π_data, nature_labels, λ=0.5, Now let's run the simulation. -In our simulation, agent 1 believes only in $f$ and $g$, while agent 2 has an equal weight on all three models +In our simulation, agent 1 assigns positive probabilities only to $f$ and $g$, while agent 2 puts equal weights on all three models ```{code-cell} ipython3 T = 100 From 623db7cbbbcf5b49d7d071bed7be0ecdcc6f1de8 Mon Sep 17 00:00:00 2001 From: thomassargent30 Date: Thu, 21 Aug 2025 18:32:50 -0400 Subject: [PATCH 10/14] Tom's second Aug 21 edits of the BE lecture --- lectures/likelihood_ratio_process_2.md | 60 ++++++++++++++++++-------- 1 file changed, 41 insertions(+), 19 deletions(-) diff --git a/lectures/likelihood_ratio_process_2.md b/lectures/likelihood_ratio_process_2.md index ce75b75ed..58b6b0f00 100644 --- a/lectures/likelihood_ratio_process_2.md +++ b/lectures/likelihood_ratio_process_2.md @@ -1331,11 +1331,12 @@ $$ $$ -Simulate and visualize the evolution of consumption allocations when: +Please simulate and visualize evolutions of posterior probabilities and consumption allocations when: + * Nature permanently draws from $f$ * Nature permanently draws from $g$ -Use the existing code structure to implement this simulation and observe how the allocation evolves over time. + ``` @@ -1403,7 +1404,7 @@ def compute_posterior_three_models( return π_f, π_g, π_h ``` -Let's also write the simulation code following the same idea as in the previous exercises +Let's also write simulation code along lines similar to earlier exercises ```{code-cell} ipython3 @jit(parallel=True) @@ -1529,7 +1530,7 @@ def plot_three_model_results(c1_data, π_data, nature_labels, λ=0.5, Now let's run the simulation. -In our simulation, agent 1 assigns positive probabilities only to $f$ and $g$, while agent 2 puts equal weights on all three models +In the simulation below, agent 1 assigns positive probabilities only to $f$ and $g$, while agent 2 puts equal weights on all three models. ```{code-cell} ipython3 T = 100 @@ -1554,17 +1555,30 @@ fig, axes = plot_three_model_results(c1_data, π_data, nature_labels, λ) plt.show() ``` -The results show interesting dynamics. +Agent 1's posterior probabilities are depicted with orange lines and agent 2's posterior beliefs are depicted with blue lines. + +The top panel shows outcomes when nature draws from $f$. + +Evidently, when nature draws from $f$, agent 1 learns faster than agent 2, who, unlike agent 1, attaches a positive prior probability to model $h$. + +The bottom panel depicts outcomes when nature draws from $g$. + +Again, agent 1 learns faster than agent 2, who, unlike agent 1, attaches some prior probability to model $h$. + + * In both panels, agent 2's posterior probability attached to $h$ (dotted line) converges to 0. + +Notice that when nature uses model $f$, the consumption share of agent 1 is only temporarily bigger than 1, when when nature uses model $g$, agent 1's consumption share is permanently higher. -In the top panel, Agent 1 (orange line) who initially puts weight only on $f$ (solid line) and $g$ (dashed line) eventually dominates consumption as they learn the truth faster than Agent 2 who spreads probability across all three models. -When nature draws from $g$ (lower panel), we see a similar pattern but reversed -- Agent 1's consumption share decreases as their belief converges to the truth. +In this exercise, the "truth" is among possible outcomes according to both agents. -For both cases, the belief on $h$ (dotted line) eventually goes to 0. +Agent 2's model is "more general" because it allows a possibility -- that nature is drawing from $h$ -- that agent 1's model does not include. -The agent with the simpler (but correct) model structure learns faster and eventually dominates consumption allocation. +Agent 1 learns more quickly because he uses a simpler model. -In other words, the model penalizes complexity and rewards accuracy. +It would be interesting to explore why agent 1's consumption allocation when $f$ generates the data is only temporarily higher than agent 2's, while when $g$ generates the data, it is permanently higher. + + * Hint: Somehow the KL divergence should be able to help us sort this out. ```{solution-end} ``` @@ -1581,11 +1595,12 @@ Consider the same setup as the previous exercise, but now: Choose $h$ to be close but not equal to either $f$ or $g$ as measured by KL divergence. For example, set $h \sim \text{Beta}(1.2, 1.1)$. -Simulate and visualize the evolution of consumption allocations when: +Please simulate and visualize evolutions of posterior probabilities and consumption allocations when: + * Nature permanently draws from $f$ * Nature permanently draws from $g$ -Observe how the presence of extreme priors affects learning and allocation dynamics. + ``` @@ -1593,11 +1608,10 @@ Observe how the presence of extreme priors affects learning and allocation dynam :class: dropdown ``` -Let's implement this case with extreme priors where one agent is almost dogmatic. -For this to converge, we need a longer sequence by increasing $T$ to 1000. +To explore this exercise, we increase $T$ to 1000. -Let's define the parameters for distributions and verify that $h$ and $f$ are closer than $h$ and $g$ +Let's specify $f, g$, and $h$ and verify that $h$ and $f$ are closer than $h$ and $g$ ```{code-cell} ipython3 F_a, F_b = 1, 1 @@ -1664,13 +1678,21 @@ fig, axes = plot_three_model_results(c1_data, π_data, nature_labels, λ) plt.show() ``` -In the top panel, observe how slowly agent 1 is adjusting to the truth -- the belief is rigid but still updating. +In the top panel, which depicts outcomes when nature draws from $f$, please observe how slowly agent 1 learns the truth. + +The posterior probability that agent 2 puts on $h$ converges to zero slowly. + + +This is because we have specified that $f$ is very difficult to distinguish from $h$ as measured by $KL(f, h)$. + +The bottom panel shows outcomes when nature draws from $g$. -The belief about $h$ slowly shifts towards 0 crossing the belief about $f$ moving up to 1 at $t = 500$. +We have specified things so that $g$ is further away from $h$ as measured by the KL divergence. -However, since agent 2 is rigid about $h$, and $f$ is very difficult to distinguish from $h$ as measured by $KL(f, h)$, we can see that the belief is almost stationary due to the difficulty of realizing the belief is incorrect. +This helps agent 2 learn the truth more quickly. -In the bottom panel, since $g$ is further away from $h$, both agents adjust toward the truth very quickly, but agent 1 acts faster given the slightly higher weight on $f$ and $g$. +Notice that agent 1's consumption share converges to 1 both when nature permanently draws from $f$ +and when nature permanently draws from $g$. ```{solution-end} ``` From bd3b00c02289c90bd0ea469467bad65c2354eb30 Mon Sep 17 00:00:00 2001 From: Humphrey Yang Date: Fri, 22 Aug 2025 10:38:22 +1000 Subject: [PATCH 11/14] updates --- lectures/likelihood_ratio_process_2.md | 203 +++++++++++++++---------- 1 file changed, 119 insertions(+), 84 deletions(-) diff --git a/lectures/likelihood_ratio_process_2.md b/lectures/likelihood_ratio_process_2.md index 58b6b0f00..87e6e2198 100644 --- a/lectures/likelihood_ratio_process_2.md +++ b/lectures/likelihood_ratio_process_2.md @@ -4,7 +4,7 @@ jupytext: extension: .md format_name: myst format_version: 0.13 - jupytext_version: 1.16.6 + jupytext_version: 1.17.1 kernelspec: display_name: Python 3 (ipykernel) language: python @@ -1320,14 +1320,14 @@ Bayes' Law tells us that posterior probabilities on models $f$ and $g$ evolve ac $$ \pi^f(s^t) := \frac{\pi^f_0 f(s^t)}{\pi^f_0 f(s^t) -+ \pi^g(s^t) g(s^t) + (1 - \pi^f_0 - \pi^g_0) h(s^t)} ++ \pi^g_0 g(s^t) + (1 - \pi^f_0 - \pi^g_0) h(s^t)} $$ and $$ \pi^g(s^t) := \frac{\pi^g_0 g(s^t)}{\pi^f_0 f(s^t) -+ \pi^g(s^t) g(s^t) + (1 - \pi^f_0 - \pi^g_0) h(s^t)} ++ \pi^g_0 g(s^t) + (1 - \pi^f_0 - \pi^g_0) h(s^t)} $$ @@ -1346,7 +1346,7 @@ Please simulate and visualize evolutions of posterior probabilities and consum Let's implement this three-model case with two agents having different beliefs. -First, let's define $f$ and $g$ far apart, with $h$ being a mixture of $f$ and $g$ +First, let's define $f$ and $g$ far apart, with $h$ being a mixture of $f$ and $g$. ```{code-cell} ipython3 F_a, F_b = 1, 1 @@ -1359,7 +1359,7 @@ g = jit(lambda x: p(x, G_a, G_b)) h = jit(lambda x: π_f_0 * f(x) + (1 - π_f_0) * g(x)) ``` -Now we can define the belief updating for the three-agent model +Now we can define the belief updating for the model ```{code-cell} ipython3 @jit(parallel=True) @@ -1404,7 +1404,7 @@ def compute_posterior_three_models( return π_f, π_g, π_h ``` -Let's also write simulation code along lines similar to earlier exercises +Let's also write simulation code along the lines of earlier exercises ```{code-cell} ipython3 @jit(parallel=True) @@ -1454,75 +1454,83 @@ The following code cell defines a plotting function to show evolutions of belief ```{code-cell} ipython3 :tags: [hide-input] -def plot_three_model_results(c1_data, π_data, nature_labels, λ=0.5, - agent_labels=None, figsize=(12, 10)): +def plot_belief_evolution(π_data, nature_labels, figsize=(15, 5)): """ - Create plots for three-model exercises. + Create plots showing belief evolution for three models (f, g, h) for both agents. + Each row corresponds to a different nature scenario. """ n_scenarios = len(nature_labels) - fig, axes = plt.subplots(2, n_scenarios, figsize=figsize) + fig, axes = plt.subplots(n_scenarios, 3, figsize=figsize) if n_scenarios == 1: - axes = axes.reshape(2, 1) + axes = axes.reshape(1, 3) - colors = ['blue', 'green', 'orange'] + model_names = ['f', 'g', 'h'] - for i, (nature_label, c1, π_tuple) in enumerate( - zip(nature_labels, c1_data, π_data)): + for i, (nature_label, π_tuple) in enumerate(zip(nature_labels, π_data)): πf1, πg1, πh1, πf2, πg2, πh2 = π_tuple - - ax = axes[i, 0] - ax.plot(np.median(πf1, axis=0), 'C0-', linewidth=2) - ax.plot(np.median(πg1, axis=0), 'C0--', linewidth=2) - ax.plot(np.median(πh1, axis=0), 'C0:', linewidth=2) - ax.plot(np.median(πf2, axis=0), 'C1-', linewidth=2) - ax.plot(np.median(πg2, axis=0), 'C1--', linewidth=2) - ax.plot(np.median(πh2, axis=0), 'C1:', linewidth=2) - - # Truth indicator - truth_val = 1.0 if nature_label == 'f' else ( - 1.0 if nature_label == 'g' else 0.0) - ax.axhline(y=truth_val, color='grey', linestyle='-.', alpha=0.7) + π_data_models = [(πf1, πf2), (πg1, πg2), (πh1, πh2)] - ax.set_title(f'Beliefs when Nature = {nature_label}') - ax.set_xlabel('$t$') - ax.set_ylabel(r'median $\pi(\cdot)$') - ax.set_ylim([-0.01, 1.01]) - - if i == 0: - from matplotlib.lines import Line2D - - # Agent colors legend - agent_elements = [ - Line2D([0], [0], color='C0', linewidth=2, label='agent 1'), - Line2D([0], [0], color='C1', linewidth=2, label='agent 2') - ] - agent_legend = ax.legend(handles=agent_elements, loc='upper left') + for j, (model_name, (π1, π2)) in enumerate(zip(model_names, π_data_models)): + ax = axes[i, j] - # Line styles legend - style_elements = [ - Line2D([0], [0], color='black', - linestyle='-', label='π(f)'), - Line2D([0], [0], color='black', - linestyle='--', label='π(g)'), - Line2D([0], [0], color='black', - linestyle=':', label='π(h)'), - Line2D([0], [0], color='grey', - linestyle='-.', alpha=0.7, label='truth') - ] - ax.legend(handles=style_elements, loc='upper right') + # Plot agent beliefs + ax.plot(np.median(π1, axis=0), 'C0-', linewidth=2, label='agent 1') + ax.plot(np.median(π2, axis=0), 'C1-', linewidth=2, label='agent 2') - ax.add_artist(agent_legend) + # Truth indicator + if nature_label == model_name: + ax.axhline(y=1.0, color='grey', linestyle='-.', + alpha=0.7, label='truth') + else: + ax.axhline(y=0.0, color='grey', linestyle='-.', + alpha=0.7, label='truth') - ax = axes[i, 1] + ax.set_title(f'π({model_name}) when Nature = {nature_label}') + ax.set_xlabel('$t$') + ax.set_ylabel(f'median π({model_name})') + ax.set_ylim([-0.01, 1.01]) + ax.legend(loc='best') + + plt.tight_layout() + return fig, axes + + +def plot_consumption_dynamics(c1_data, nature_labels, λ=0.5, figsize=(12, 4)): + """ + Create plots showing consumption share dynamics for agent 1. + """ + n_scenarios = len(nature_labels) + fig, axes = plt.subplots(1, n_scenarios, figsize=figsize) + if n_scenarios == 1: + axes = [axes] + + colors = ['blue', 'green', 'orange'] + + for i, (nature_label, c1) in enumerate(zip(nature_labels, c1_data)): + ax = axes[i] c1_med = np.median(c1, axis=0) - ax.plot(c1_med, color=colors[i], linewidth=2, label="median") - ax.axhline(y=0.5, color='grey', linestyle='--', alpha=0.5) - ax.set_title( - f'Agent 1 consumption share (Nature = {nature_label})') - ax.set_xlabel('t') - ax.set_ylabel("median consumption share") + + # Plot median and percentiles + ax.plot(c1_med, color=colors[i % len(colors)], + linewidth=2, label="median") + + # Add percentile bands + c1_25 = np.percentile(c1, 25, axis=0) + c1_75 = np.percentile(c1, 75, axis=0) + ax.fill_between(range(len(c1_med)), c1_25, c1_75, + color=colors[i % len(colors)], alpha=0.2, + label="25-75 percentile") + + ax.axhline(y=0.5, color='grey', linestyle='--', + alpha=0.5, label='equal share') + ax.axhline(y=λ, color='red', linestyle=':', + alpha=0.5, label=f'initial share (λ={λ})') + + ax.set_title(f'Agent 1 consumption share (Nature = {nature_label})') + ax.set_xlabel('$t$') + ax.set_ylabel("consumption share") ax.set_ylim([-0.01, 1.01]) - ax.legend() + ax.legend(loc='best') plt.tight_layout() return fig, axes @@ -1530,7 +1538,7 @@ def plot_three_model_results(c1_data, π_data, nature_labels, λ=0.5, Now let's run the simulation. -In the simulation below, agent 1 assigns positive probabilities only to $f$ and $g$, while agent 2 puts equal weights on all three models. +In the simulation below, agent 1 assigns positive probabilities only to $f$ and $g$, while agent 2 puts equal weights on all three models. ```{code-cell} ipython3 T = 100 @@ -1551,24 +1559,51 @@ c1_data = [results_f[0], results_g[0]] π_data = [results_f[1:], results_g[1:]] nature_labels = ['f', 'g'] -fig, axes = plot_three_model_results(c1_data, π_data, nature_labels, λ) -plt.show() +plot_belief_evolution(π_data, nature_labels, figsize=(15, 5*len(nature_labels))) +plt.plot(); ``` -Agent 1's posterior probabilities are depicted with orange lines and agent 2's posterior beliefs are depicted with blue lines. +These plots show the evolution of beliefs for each model (f, g, h) separately. + +Agent 1's posterior probabilities are depicted in blue and agent 2's posterior beliefs are depicted in orange. -The top panel shows outcomes when nature draws from $f$. +The top panel shows outcomes when nature draws from $f$. + +Evidently, when nature draws from $f$, agent 1 learns faster than agent 2, who, unlike agent 1, attaches a positive prior probability to model $h$: + +- In the leftmost panel, both agents' beliefs for $\pi(f)$ converge toward 1 (the truth) +- Agent 1 learns faster than agent 2, who initially assigns probability to model $h$ +- Agent 2's belief in model $h$ (rightmost panel) gradually converges to 0 -Evidently, when nature draws from $f$, agent 1 learns faster than agent 2, who, unlike agent 1, attaches a positive prior probability to model $h$. The bottom panel depicts outcomes when nature draws from $g$. -Again, agent 1 learns faster than agent 2, who, unlike agent 1, attaches some prior probability to model $h$. +Again, agent 1 learns faster than agent 2, who, unlike agent 1, attaches some prior probability to model $h$: + +- In the middle panel, both agents' beliefs for $\pi(g)$ converge toward 1 (the truth) +- Again, agent 1 learns faster due to not considering model $h$ initially +- Agent 2's belief in model $h$ converges to 0 over time - * In both panels, agent 2's posterior probability attached to $h$ (dotted line) converges to 0. +In both panels, agent 2's posterior probability attached to $h$ (dotted line) converges to 0. -Notice that when nature uses model $f$, the consumption share of agent 1 is only temporarily bigger than 1, when when nature uses model $g$, agent 1's consumption share is permanently higher. +Note the difference in the convergence speed when nature draws from $f$ and $g$. + +The time it takes for agent 2 to "catch up" is longer when nature draws from $g$. + +Agent 1 converges faster because it only needs to update beliefs between two models ($f$ and $g$), while agent 2 must also rule out model $h$. + + +Before reading the next figure, please guess how consumption shares evolve. + +Remember that agent 1 reaches the correct model faster than agent 2. + +```{code-cell} ipython3 +plot_consumption_dynamics(c1_data, nature_labels, λ=0.5, figsize=(12, 6)) +plt.show() +``` + +This plot shows the consumption share dynamics. Notice that when nature uses model $f$, the consumption share of agent 1 is only temporarily higher than 0.5, while when nature uses model $g$, agent 1's consumption share is permanently higher. In this exercise, the "truth" is among possible outcomes according to both agents. @@ -1599,9 +1634,6 @@ Please simulate and visualize evolutions of posterior probabilities and consum * Nature permanently draws from $f$ * Nature permanently draws from $g$ - - - ``` ```{solution-start} lr_ex7 @@ -1674,25 +1706,28 @@ c1_data = [results_f[0], results_g[0]] π_data = [results_f[1:], results_g[1:]] nature_labels = ['f', 'g'] -fig, axes = plot_three_model_results(c1_data, π_data, nature_labels, λ) -plt.show() +plot_belief_evolution(π_data, nature_labels, figsize=(15, 5*len(nature_labels))) +plt.plot(); ``` -In the top panel, which depicts outcomes when nature draws from $f$, please observe how slowly agent 1 learns the truth. +When nature draws from $f$ (top row), observe how slowly agent 1 learns the truth in the leftmost panel showing $\pi(f)$. -The posterior probability that agent 2 puts on $h$ converges to zero slowly. +The posterior probability that agent 1 puts on $h$ (rightmost panel) converges to zero slowly. +This is because we have specified that $f$ is very difficult to distinguish from $h$ as measured by $KL(f, h)$. -This is because we have specified that $f$ is very difficult to distinguish from $h$ as measured by $KL(f, h)$. +When it comes to agent 2, the belief remains stationary at 0 and does not converge to the true model because of its rigidity regarding $h$, and $f$ is very difficult to distinguish from $h$. -The bottom panel shows outcomes when nature draws from $g$. +When nature draws from $g$ (bottom row), we have specified things so that $g$ is further away from $h$ as measured by the KL divergence. -We have specified things so that $g$ is further away from $h$ as measured by the KL divergence. +This helps both agents learn the truth more quickly, as seen in the middle panel showing $\pi(g)$. -This helps agent 2 learn the truth more quickly. +```{code-cell} ipython3 +plot_consumption_dynamics(c1_data, nature_labels, λ=0.5, figsize=(12, 6)) +plt.show() +``` -Notice that agent 1's consumption share converges to 1 both when nature permanently draws from $f$ -and when nature permanently draws from $g$. +In the consumption dynamics plot, notice that agent 1's consumption share converges to 1 both when nature permanently draws from $f$ and when nature permanently draws from $g$. ```{solution-end} ``` From 2dc53c3960609f1c19dc86fe72fe9931a49c013d Mon Sep 17 00:00:00 2001 From: Humphrey Yang Date: Sat, 23 Aug 2025 01:06:46 +1000 Subject: [PATCH 12/14] update parameters --- lectures/likelihood_ratio_process_2.md | 275 ++++++++++++++----------- 1 file changed, 159 insertions(+), 116 deletions(-) diff --git a/lectures/likelihood_ratio_process_2.md b/lectures/likelihood_ratio_process_2.md index 87e6e2198..a0f2668e8 100644 --- a/lectures/likelihood_ratio_process_2.md +++ b/lectures/likelihood_ratio_process_2.md @@ -4,7 +4,7 @@ jupytext: extension: .md format_name: myst format_version: 0.13 - jupytext_version: 1.17.1 + jupytext_version: 1.16.6 kernelspec: display_name: Python 3 (ipykernel) language: python @@ -1349,10 +1349,10 @@ Let's implement this three-model case with two agents having different beliefs. First, let's define $f$ and $g$ far apart, with $h$ being a mixture of $f$ and $g$. ```{code-cell} ipython3 -F_a, F_b = 1, 1 -G_a, G_b = 3, 1.2 +F_a, F_b = 3, 2 +G_a, G_b = 2, 3 λ = 0.5 -π_f_0 = 0.4 +π_f_0 = 0.5 f = jit(lambda x: p(x, F_a, F_b)) g = jit(lambda x: p(x, G_a, G_b)) @@ -1407,46 +1407,101 @@ def compute_posterior_three_models( Let's also write simulation code along the lines of earlier exercises ```{code-cell} ipython3 +@jit +def bayesian_update_three_models(π_f_0, π_g_0, L_f, L_g, L_h): + """Bayesian update for three models.""" + π_h_0 = 1 - π_f_0 - π_g_0 + denom = π_f_0 * L_f + π_g_0 * L_g + π_h_0 * L_h + return π_f_0 * L_f / denom, π_g_0 * L_g / denom, π_h_0 * L_h / denom + +@jit +def compute_mixture_density(π_f, π_g, π_h, f_val, g_val, h_val): + """Compute mixture density for an agent.""" + return π_f * f_val + π_g * g_val + π_h * h_val + @jit(parallel=True) -def simulate_three_model_allocation(s_seq, f_func, g_func, h_func, - π_f_1, π_g_1, π_f_2, π_g_2, λ=0.5): +def simulate_three_model_allocation(sequences, f_func, g_func, h_func, + π_f_0_1, π_g_0_1, π_f_0_2, π_g_0_2, λ=0.5): """ - Simulate two agents having beliefs over three models. + Simulate Blume-Easley model with learning agents and three models. """ + N, T = sequences.shape - N, T = s_seq.shape - - # Compute posteriors for both agents - π_f_1_seq, π_g_1_seq, π_h_1_seq = compute_posterior_three_models( - s_seq, f_func, g_func, h_func, π_f_1, π_g_1) - π_f_2_seq, π_g_2_seq, π_h_2_seq = compute_posterior_three_models( - s_seq, f_func, g_func, h_func, π_f_2, π_g_2) + # Initialize arrays to store results + beliefs_1 = {k: np.full((N, T), np.nan) for k in ['π_f', 'π_g', 'π_h']} + beliefs_2 = {k: np.full((N, T), np.nan) for k in ['π_f', 'π_g', 'π_h']} + c1_share = np.full((N, T), np.nan) + l_agents_seq = np.full((N, T), np.nan) - # Compute consumption shares - c1_share = np.zeros((N, T)) + # Set initial beliefs + beliefs_1['π_f'][:, 0] = π_f_0_1 + beliefs_1['π_g'][:, 0] = π_g_0_1 + beliefs_1['π_h'][:, 0] = 1 - π_f_0_1 - π_g_0_1 + beliefs_2['π_f'][:, 0] = π_f_0_2 + beliefs_2['π_g'][:, 0] = π_g_0_2 + beliefs_2['π_h'][:, 0] = 1 - π_f_0_2 - π_g_0_2 - for n in prange(N): - l_agents_cumul = 1.0 # Initialize likelihood ratio between agents + for n in range(N): + # Initialize cumulative likelihoods + L_cumul = {'f': 1.0, 'g': 1.0, 'h': 1.0} + l_agents_cumul = 1.0 + + # Calculate initial consumption share at t=0 + # (before any observations, likelihood ratio = 1) + l_agents_seq[n, 0] = 1.0 + c1_share[n, 0] = λ * 1.0 / (1 - λ + λ * 1.0) # This equals λ - for t in range(T): - # Agent 1's mixture density - m1_t = (π_f_1_seq[n, t] * f_func(s_seq[n, t]) + - π_g_1_seq[n, t] * g_func(s_seq[n, t]) + - π_h_1_seq[n, t] * h_func(s_seq[n, t])) + for t in range(1, T): + s_t = sequences[n, t] + + # Compute densities for current observation + densities = { + 'f': f_func(s_t), + 'g': g_func(s_t), + 'h': h_func(s_t) + } + + # Update cumulative likelihoods + for model in L_cumul: + L_cumul[model] *= densities[model] + + # Bayesian updates for both agents + π_f_1, π_g_1, π_h_1 = bayesian_update_three_models( + π_f_0_1, π_g_0_1, L_cumul['f'], L_cumul['g'], L_cumul['h']) + π_f_2, π_g_2, π_h_2 = bayesian_update_three_models( + π_f_0_2, π_g_0_2, L_cumul['f'], L_cumul['g'], L_cumul['h']) + + # Store beliefs + beliefs_1['π_f'][n, t] = π_f_1 + beliefs_1['π_g'][n, t] = π_g_1 + beliefs_1['π_h'][n, t] = π_h_1 + beliefs_2['π_f'][n, t] = π_f_2 + beliefs_2['π_g'][n, t] = π_g_2 + beliefs_2['π_h'][n, t] = π_h_2 - # Agent 2's mixture density - m2_t = (π_f_2_seq[n, t] * f_func(s_seq[n, t]) + - π_g_2_seq[n, t] * g_func(s_seq[n, t]) + - π_h_2_seq[n, t] * h_func(s_seq[n, t])) + # Compute mixture densities + m1_t = compute_mixture_density( + π_f_1, π_g_1, π_h_1, densities['f'], densities['g'], densities['h']) + m2_t = compute_mixture_density( + π_f_2, π_g_2, π_h_2, densities['f'], densities['g'], densities['h']) - # Update likelihood ratio between agents - if t > 0: - l_agents_cumul *= (m1_t / m2_t) + # Update cumulative likelihood ratio between agents + l_agents_cumul *= (m1_t / m2_t) + l_agents_seq[n, t] = l_agents_cumul # Consumption share for agent 1 c1_share[n, t] = λ * l_agents_cumul / (1 - λ + λ * l_agents_cumul) - return c1_share, π_f_1_seq, π_g_1_seq, π_h_1_seq, π_f_2_seq, π_g_2_seq, π_h_2_seq + return { + 'π_f_1': beliefs_1['π_f'], + 'π_g_1': beliefs_1['π_g'], + 'π_h_1': beliefs_1['π_h'], + 'π_f_2': beliefs_2['π_f'], + 'π_g_2': beliefs_2['π_g'], + 'π_h_2': beliefs_2['π_h'], + 'c1_share': c1_share, + 'l_agents': l_agents_seq + } ``` The following code cell defines a plotting function to show evolutions of beliefs and consumption ratios @@ -1454,82 +1509,71 @@ The following code cell defines a plotting function to show evolutions of belief ```{code-cell} ipython3 :tags: [hide-input] -def plot_belief_evolution(π_data, nature_labels, figsize=(15, 5)): +def plot_belief_evolution(results, nature='f', figsize=(15, 5)): """ Create plots showing belief evolution for three models (f, g, h) for both agents. - Each row corresponds to a different nature scenario. """ - n_scenarios = len(nature_labels) - fig, axes = plt.subplots(n_scenarios, 3, figsize=figsize) - if n_scenarios == 1: - axes = axes.reshape(1, 3) + fig, axes = plt.subplots(1, 3, figsize=figsize) model_names = ['f', 'g', 'h'] + belief_keys = [('π_f_1', 'π_f_2'), ('π_g_1', 'π_g_2'), ('π_h_1', 'π_h_2')] - for i, (nature_label, π_tuple) in enumerate(zip(nature_labels, π_data)): - πf1, πg1, πh1, πf2, πg2, πh2 = π_tuple - π_data_models = [(πf1, πf2), (πg1, πg2), (πh1, πh2)] + for j, (model_name, (key1, key2)) in enumerate(zip(model_names, belief_keys)): + ax = axes[j] - for j, (model_name, (π1, π2)) in enumerate(zip(model_names, π_data_models)): - ax = axes[i, j] - - # Plot agent beliefs - ax.plot(np.median(π1, axis=0), 'C0-', linewidth=2, label='agent 1') - ax.plot(np.median(π2, axis=0), 'C1-', linewidth=2, label='agent 2') - - # Truth indicator - if nature_label == model_name: - ax.axhline(y=1.0, color='grey', linestyle='-.', - alpha=0.7, label='truth') - else: - ax.axhline(y=0.0, color='grey', linestyle='-.', - alpha=0.7, label='truth') - - ax.set_title(f'π({model_name}) when Nature = {nature_label}') - ax.set_xlabel('$t$') - ax.set_ylabel(f'median π({model_name})') - ax.set_ylim([-0.01, 1.01]) - ax.legend(loc='best') + # Plot agent beliefs + ax.plot(np.median(results[key1], axis=0), 'C0-', linewidth=2, label='agent 1') + ax.plot(np.median(results[key2], axis=0), 'C1-', linewidth=2, label='agent 2') + + # Truth indicator + if model_name == nature: + ax.axhline(y=1.0, color='grey', linestyle='-.', + alpha=0.7, label='truth') + else: + ax.axhline(y=0.0, color='grey', linestyle='-.', + alpha=0.7, label='truth') + + ax.set_title(f'π({model_name}) when Nature = {nature}') + ax.set_xlabel('$t$') + ax.set_ylabel(f'median π({model_name})') + ax.set_ylim([-0.01, 1.01]) + ax.legend(loc='best') plt.tight_layout() return fig, axes -def plot_consumption_dynamics(c1_data, nature_labels, λ=0.5, figsize=(12, 4)): +def plot_consumption_dynamics(results_f, results_g, λ=0.5, figsize=(14, 5)): """ - Create plots showing consumption share dynamics for agent 1. + Create plot showing consumption share dynamics for agent 1 for both nature states. """ - n_scenarios = len(nature_labels) - fig, axes = plt.subplots(1, n_scenarios, figsize=figsize) - if n_scenarios == 1: - axes = [axes] + fig, axes = plt.subplots(1, 2, figsize=figsize) - colors = ['blue', 'green', 'orange'] + results_list = [results_f, results_g] + nature_labels = ['f', 'g'] + colors = ['blue', 'green'] - for i, (nature_label, c1) in enumerate(zip(nature_labels, c1_data)): + for i, (results, nature_label, color) in enumerate(zip(results_list, nature_labels, colors)): ax = axes[i] + c1 = results['c1_share'] c1_med = np.median(c1, axis=0) # Plot median and percentiles - ax.plot(c1_med, color=colors[i % len(colors)], - linewidth=2, label="median") + ax.plot(c1_med, color=color, linewidth=2, label="median") # Add percentile bands c1_25 = np.percentile(c1, 25, axis=0) c1_75 = np.percentile(c1, 75, axis=0) ax.fill_between(range(len(c1_med)), c1_25, c1_75, - color=colors[i % len(colors)], alpha=0.2, - label="25-75 percentile") + color=color, alpha=0.2, label="25-75 percentile") ax.axhline(y=0.5, color='grey', linestyle='--', alpha=0.5, label='equal share') - ax.axhline(y=λ, color='red', linestyle=':', - alpha=0.5, label=f'initial share (λ={λ})') ax.set_title(f'Agent 1 consumption share (Nature = {nature_label})') ax.set_xlabel('$t$') ax.set_ylabel("consumption share") - ax.set_ylim([-0.01, 1.01]) + ax.set_ylim([-0.02, 1.02]) ax.legend(loc='best') plt.tight_layout() @@ -1548,62 +1592,57 @@ N = 1000 s_seq_f = np.random.beta(F_a, F_b, (N, T)) s_seq_g = np.random.beta(G_a, G_b, (N, T)) +# Run simulations results_f = simulate_three_model_allocation(s_seq_f, f, g, h, π_f_0, 1-π_f_0, 1/3, 1/3, λ) results_g = simulate_three_model_allocation(s_seq_g, f, g, h, π_f_0, 1-π_f_0, 1/3, 1/3, λ) +``` -c1_data = [results_f[0], results_g[0]] -π_data = [results_f[1:], results_g[1:]] -nature_labels = ['f', 'g'] +Plots below show the evolution of beliefs for each model (f, g, h) separately. -plot_belief_evolution(π_data, nature_labels, figsize=(15, 5*len(nature_labels))) -plt.plot(); -``` +First we show the figure when nature chooses $f$ -These plots show the evolution of beliefs for each model (f, g, h) separately. +```{code-cell} ipython3 +plot_belief_evolution(results_f, nature='f', figsize=(15, 5)) +plt.show() +``` Agent 1's posterior probabilities are depicted in blue and agent 2's posterior beliefs are depicted in orange. -The top panel shows outcomes when nature draws from $f$. - Evidently, when nature draws from $f$, agent 1 learns faster than agent 2, who, unlike agent 1, attaches a positive prior probability to model $h$: - In the leftmost panel, both agents' beliefs for $\pi(f)$ converge toward 1 (the truth) -- Agent 1 learns faster than agent 2, who initially assigns probability to model $h$ -- Agent 2's belief in model $h$ (rightmost panel) gradually converges to 0 - - -The bottom panel depicts outcomes when nature draws from $g$. +- Agent 1 learns faster than agent 2 +- Agent 2's belief in model $h$ (rightmost panel) gradually converges to 0 after an initial rise -Again, agent 1 learns faster than agent 2, who, unlike agent 1, attaches some prior probability to model $h$: +Now let's plot the belief evolution when nature = g: -- In the middle panel, both agents' beliefs for $\pi(g)$ converge toward 1 (the truth) -- Again, agent 1 learns faster due to not considering model $h$ initially -- Agent 2's belief in model $h$ converges to 0 over time - -In both panels, agent 2's posterior probability attached to $h$ (dotted line) converges to 0. +```{code-cell} ipython3 +plot_belief_evolution(results_g, nature='g', figsize=(15, 5)) +plt.show() +``` +Again, agent 1 learns faster than agent 2: Note the difference in the convergence speed when nature draws from $f$ and $g$. The time it takes for agent 2 to "catch up" is longer when nature draws from $g$. -Agent 1 converges faster because it only needs to update beliefs between two models ($f$ and $g$), while agent 2 must also rule out model $h$. - +This is because agent 1's prior is closer to the truth when nature draws from $g$ Before reading the next figure, please guess how consumption shares evolve. -Remember that agent 1 reaches the correct model faster than agent 2. +Remember that agent 1 reaches the correct model faster than agent 2 ```{code-cell} ipython3 -plot_consumption_dynamics(c1_data, nature_labels, λ=0.5, figsize=(12, 6)) +plot_consumption_dynamics(results_f, results_g, λ=0.5, figsize=(14, 5)) plt.show() ``` -This plot shows the consumption share dynamics. Notice that when nature uses model $f$, the consumption share of agent 1 is only temporarily higher than 0.5, while when nature uses model $g$, agent 1's consumption share is permanently higher. +As we expected, agent 1 has a higher consumption share compared to agent 2. In this exercise, the "truth" is among possible outcomes according to both agents. @@ -1611,10 +1650,6 @@ Agent 2's model is "more general" because it allows a possibility -- that nature Agent 1 learns more quickly because he uses a simpler model. -It would be interesting to explore why agent 1's consumption allocation when $f$ generates the data is only temporarily higher than agent 2's, while when $g$ generates the data, it is permanently higher. - - * Hint: Somehow the KL divergence should be able to help us sort this out. - ```{solution-end} ``` @@ -1640,7 +1675,6 @@ Please simulate and visualize evolutions of posterior probabilities and consum :class: dropdown ``` - To explore this exercise, we increase $T$ to 1000. Let's specify $f, g$, and $h$ and verify that $h$ and $f$ are closer than $h$ and $g$ @@ -1701,29 +1735,38 @@ results_g = simulate_three_model_allocation( s_seq_g, f, g, h, π_f_1, π_g_1, π_f_2, π_g_2, λ) +``` -c1_data = [results_f[0], results_g[0]] -π_data = [results_f[1:], results_g[1:]] -nature_labels = ['f', 'g'] +Let's plot the belief evolution when nature chooses $f$ -plot_belief_evolution(π_data, nature_labels, figsize=(15, 5*len(nature_labels))) -plt.plot(); +```{code-cell} ipython3 +plot_belief_evolution(results_f, nature='f', figsize=(15, 5)) +plt.show() ``` -When nature draws from $f$ (top row), observe how slowly agent 1 learns the truth in the leftmost panel showing $\pi(f)$. +Observe how slowly agent 1 learns the truth in the leftmost panel showing $\pi(f)$. -The posterior probability that agent 1 puts on $h$ (rightmost panel) converges to zero slowly. +Also note that agent 2 is not updating. This is because we have specified that $f$ is very difficult to distinguish from $h$ as measured by $KL(f, h)$. -When it comes to agent 2, the belief remains stationary at 0 and does not converge to the true model because of its rigidity regarding $h$, and $f$ is very difficult to distinguish from $h$. +The rigidity regarding $h$ prevents agent 2 from updating its beliefs when observing +a very similar model $f$ + +Now let's plot the belief evolution when nature chooses $g$ + +```{code-cell} ipython3 +plot_belief_evolution(results_g, nature='g', figsize=(15, 5)) +plt.show() +``` -When nature draws from $g$ (bottom row), we have specified things so that $g$ is further away from $h$ as measured by the KL divergence. +When nature draws from $g$, it is further away from $h$ as measured by the KL divergence. -This helps both agents learn the truth more quickly, as seen in the middle panel showing $\pi(g)$. +This helps both agents learn the truth more quickly. ```{code-cell} ipython3 -plot_consumption_dynamics(c1_data, nature_labels, λ=0.5, figsize=(12, 6)) +plot_consumption_dynamics(results_f, results_g, + λ=0.5, figsize=(14, 5)) plt.show() ``` From b7872593f980ca9f212a2ca90ca8bb20fe46cb7c Mon Sep 17 00:00:00 2001 From: Humphrey Yang Date: Mon, 25 Aug 2025 09:06:04 +0200 Subject: [PATCH 13/14] minor updates --- lectures/likelihood_ratio_process_2.md | 56 ++++++++++++-------------- 1 file changed, 25 insertions(+), 31 deletions(-) diff --git a/lectures/likelihood_ratio_process_2.md b/lectures/likelihood_ratio_process_2.md index a0f2668e8..f0ea199de 100644 --- a/lectures/likelihood_ratio_process_2.md +++ b/lectures/likelihood_ratio_process_2.md @@ -1013,8 +1013,6 @@ Please write Python code that answers the following questions. :class: dropdown ``` - - First, let's write helper functions that compute model components including each agent's subjective belief function. ```{code-cell} ipython3 @@ -1312,9 +1310,9 @@ We'll consider two agents: * Agent 2: $\pi^g_0 = \pi^f_0 = 1/3$, $\pi^h_0 = 1/3$ (attaches equal weights to all three models) -Let $f$ and $g$ be two beta distributions with $f \sim \text{Beta}(1, 1)$ and -$g \sim \text{Beta}(3, 1.2)$, and -set $h = \pi^f_0 f + (1-\pi^f_0) g$. +Let $f$ and $g$ be two beta distributions with $f \sim \text{Beta}(3, 2)$ and +$g \sim \text{Beta}(2, 3)$, and +set $h = \pi^f_0 f + (1-\pi^f_0) g$ with $\pi^f_0 = 0.5$. Bayes' Law tells us that posterior probabilities on models $f$ and $g$ evolve according to @@ -1335,9 +1333,6 @@ Please simulate and visualize evolutions of posterior probabilities and consum * Nature permanently draws from $f$ * Nature permanently draws from $g$ - - - ``` ```{solution-start} lr_ex6 @@ -1346,7 +1341,7 @@ Please simulate and visualize evolutions of posterior probabilities and consum Let's implement this three-model case with two agents having different beliefs. -First, let's define $f$ and $g$ far apart, with $h$ being a mixture of $f$ and $g$. +Let's define $f$ and $g$ far apart, with $h$ being a mixture of $f$ and $g$. ```{code-cell} ipython3 F_a, F_b = 3, 2 @@ -1447,7 +1442,6 @@ def simulate_three_model_allocation(sequences, f_func, g_func, h_func, l_agents_cumul = 1.0 # Calculate initial consumption share at t=0 - # (before any observations, likelihood ratio = 1) l_agents_seq[n, 0] = 1.0 c1_share[n, 0] = λ * 1.0 / (1 - λ + λ * 1.0) # This equals λ @@ -1481,9 +1475,11 @@ def simulate_three_model_allocation(sequences, f_func, g_func, h_func, # Compute mixture densities m1_t = compute_mixture_density( - π_f_1, π_g_1, π_h_1, densities['f'], densities['g'], densities['h']) + π_f_1, π_g_1, π_h_1, densities['f'], + densities['g'], densities['h']) m2_t = compute_mixture_density( - π_f_2, π_g_2, π_h_2, densities['f'], densities['g'], densities['h']) + π_f_2, π_g_2, π_h_2, densities['f'], + densities['g'], densities['h']) # Update cumulative likelihood ratio between agents l_agents_cumul *= (m1_t / m2_t) @@ -1511,19 +1507,24 @@ The following code cell defines a plotting function to show evolutions of belief def plot_belief_evolution(results, nature='f', figsize=(15, 5)): """ - Create plots showing belief evolution for three models (f, g, h) for both agents. + Create plots showing belief evolution for three models (f, g, h). """ fig, axes = plt.subplots(1, 3, figsize=figsize) model_names = ['f', 'g', 'h'] - belief_keys = [('π_f_1', 'π_f_2'), ('π_g_1', 'π_g_2'), ('π_h_1', 'π_h_2')] + belief_keys = [('π_f_1', 'π_f_2'), + ('π_g_1', 'π_g_2'), + ('π_h_1', 'π_h_2')] - for j, (model_name, (key1, key2)) in enumerate(zip(model_names, belief_keys)): + for j, (model_name, (key1, key2)) in enumerate( + zip(model_names, belief_keys)): ax = axes[j] # Plot agent beliefs - ax.plot(np.median(results[key1], axis=0), 'C0-', linewidth=2, label='agent 1') - ax.plot(np.median(results[key2], axis=0), 'C1-', linewidth=2, label='agent 2') + ax.plot(np.median(results[key1], axis=0), 'C0-', + linewidth=2, label='agent 1') + ax.plot(np.median(results[key2], axis=0), 'C1-', + linewidth=2, label='agent 2') # Truth indicator if model_name == nature: @@ -1545,7 +1546,7 @@ def plot_belief_evolution(results, nature='f', figsize=(15, 5)): def plot_consumption_dynamics(results_f, results_g, λ=0.5, figsize=(14, 5)): """ - Create plot showing consumption share dynamics for agent 1 for both nature states. + Create plot showing consumption share dynamics for agent 1. """ fig, axes = plt.subplots(1, 2, figsize=figsize) @@ -1553,7 +1554,8 @@ def plot_consumption_dynamics(results_f, results_g, λ=0.5, figsize=(14, 5)): nature_labels = ['f', 'g'] colors = ['blue', 'green'] - for i, (results, nature_label, color) in enumerate(zip(results_list, nature_labels, colors)): + for i, (results, nature_label, color) in enumerate( + zip(results_list, nature_labels, colors)): ax = axes[i] c1 = results['c1_share'] c1_med = np.median(c1, axis=0) @@ -1610,28 +1612,21 @@ plot_belief_evolution(results_f, nature='f', figsize=(15, 5)) plt.show() ``` -Agent 1's posterior probabilities are depicted in blue and agent 2's posterior beliefs are depicted in orange. +Agent 1's posterior beliefs are depicted in blue and agent 2's posterior beliefs are depicted in orange. Evidently, when nature draws from $f$, agent 1 learns faster than agent 2, who, unlike agent 1, attaches a positive prior probability to model $h$: - In the leftmost panel, both agents' beliefs for $\pi(f)$ converge toward 1 (the truth) -- Agent 1 learns faster than agent 2 - Agent 2's belief in model $h$ (rightmost panel) gradually converges to 0 after an initial rise -Now let's plot the belief evolution when nature = g: +Now let's plot the belief evolution when nature chooses $g$: ```{code-cell} ipython3 plot_belief_evolution(results_g, nature='g', figsize=(15, 5)) plt.show() ``` -Again, agent 1 learns faster than agent 2: - -Note the difference in the convergence speed when nature draws from $f$ and $g$. - -The time it takes for agent 2 to "catch up" is longer when nature draws from $g$. - -This is because agent 1's prior is closer to the truth when nature draws from $g$ +Again, agent 1 learns faster than agent 2. Before reading the next figure, please guess how consumption shares evolve. @@ -1663,7 +1658,7 @@ Consider the same setup as the previous exercise, but now: * Agent 2: $\pi^g_0 = \pi^f_0 = 0$ (rigid belief in model $h$) Choose $h$ to be close but not equal to either $f$ or $g$ as measured by KL divergence. -For example, set $h \sim \text{Beta}(1.2, 1.1)$. +For example, set $h \sim \text{Beta}(1.2, 1.1)$ and $f \sim \text{Beta}(1, 1)$. Please simulate and visualize evolutions of posterior probabilities and consumption allocations when: @@ -1701,7 +1696,6 @@ print(f"KL(f,h) = {Kf_h:.4f}, KL(g,h) = {Kg_h:.4f}") Now we can set the belief models for the two agents ```{code-cell} ipython3 -# Set extreme priors ε = 0.01 λ = 0.5 From 72a2178bdecab21a16d560398e9c8c0fd95f697f Mon Sep 17 00:00:00 2001 From: Humphrey Yang Date: Mon, 25 Aug 2025 09:47:46 +0200 Subject: [PATCH 14/14] remove redundent imports --- lectures/likelihood_ratio_process_2.md | 7 ------- 1 file changed, 7 deletions(-) diff --git a/lectures/likelihood_ratio_process_2.md b/lectures/likelihood_ratio_process_2.md index f0ea199de..fdff7ec2c 100644 --- a/lectures/likelihood_ratio_process_2.md +++ b/lectures/likelihood_ratio_process_2.md @@ -26,7 +26,6 @@ kernelspec: :depth: 2 ``` -(overview)= ## Overview A likelihood ratio process lies behind Lawrence Blume and David Easley's answer to their question @@ -56,8 +55,6 @@ You can learn about how the two welfare theorems are applied in modern macroecon ``` - - Let's start by importing some Python tools. ```{code-cell} ipython3 @@ -66,10 +63,6 @@ import numpy as np from numba import vectorize, jit, prange from math import gamma from scipy.integrate import quad -from scipy.optimize import brentq, minimize_scalar -import pandas as pd -from IPython.display import display, Math -import quantecon as qe ``` ## Review: Likelihood Ratio Processes